content
stringlengths 5
1.05M
|
|---|
# TODO: Show runs with command line option
# TODO: Let user customize root template
# TODO: Add unit tests
import json
from invisibleroads_macros_disk import is_path_in_folder, make_random_folder
from itertools import count
from logging import getLogger
from os.path import basename, exists, join, splitext
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from pyramid.response import FileResponse, Response
from ..constants import (
AUTOMATION_ROUTE,
BATCH_ROUTE,
ID_LENGTH,
MODE_NAME_BY_CODE,
MODE_ROUTE,
RUN_ROUTE,
STYLE_ROUTE,
VARIABLE_ID_PATTERN,
VARIABLE_ROUTE)
from ..exceptions import CrossComputeDataError
from ..macros.iterable import extend_uniquely, find_item
from ..macros.web import get_html_from_markdown
from ..routines.configuration import (
get_css_uris,
get_template_texts,
get_variable_definitions)
from ..routines.variable import (
VariableView,
load_variable_data,
parse_data_by_id)
class AutomationRoutes():
def __init__(
self, automation_definitions, automation_queue, timestamp_object):
self.automation_definitions = automation_definitions
self.automation_queue = automation_queue
self._timestamp_object = timestamp_object
def includeme(self, config):
config.include(self.configure_root)
config.include(self.configure_styles)
config.include(self.configure_automations)
config.include(self.configure_batches)
config.include(self.configure_runs)
def configure_root(self, config):
config.add_route('root', '/')
config.add_view(
self.see_root,
route_name='root',
renderer='crosscompute:templates/root.jinja2')
def configure_styles(self, config):
config.add_route(
'style', STYLE_ROUTE)
config.add_route(
'automation style', AUTOMATION_ROUTE + STYLE_ROUTE)
config.add_view(
self.see_style,
route_name='style')
config.add_view(
self.see_style,
route_name='automation style')
def configure_automations(self, config):
config.add_route(
'automation.json',
AUTOMATION_ROUTE + '.json')
config.add_route(
'automation',
AUTOMATION_ROUTE)
config.add_view(
self.run_automation,
route_name='automation.json',
renderer='json')
config.add_view(
self.see_automation,
route_name='automation',
renderer='crosscompute:templates/automation.jinja2')
def configure_batches(self, config):
config.add_route(
'automation batch',
AUTOMATION_ROUTE + BATCH_ROUTE)
config.add_route(
'automation batch mode',
AUTOMATION_ROUTE + BATCH_ROUTE + MODE_ROUTE)
config.add_route(
'automation batch mode variable',
AUTOMATION_ROUTE + BATCH_ROUTE + MODE_ROUTE + VARIABLE_ROUTE)
config.add_view(
self.see_automation_batch_mode,
route_name='automation batch mode',
renderer='crosscompute:templates/mode.jinja2')
config.add_view(
self.see_automation_batch_mode_variable,
route_name='automation batch mode variable')
def configure_runs(self, config):
config.add_route(
'automation run',
AUTOMATION_ROUTE + RUN_ROUTE)
config.add_route(
'automation run mode',
AUTOMATION_ROUTE + RUN_ROUTE + MODE_ROUTE)
config.add_route(
'automation run mode variable',
AUTOMATION_ROUTE + RUN_ROUTE + MODE_ROUTE + VARIABLE_ROUTE)
config.add_view(
self.see_automation_batch_mode,
route_name='automation run mode',
renderer='crosscompute:templates/mode.jinja2')
config.add_view(
self.see_automation_batch_mode_variable,
route_name='automation run mode variable')
def see_root(self, request):
'Render root with a list of available automations'
automation_definitions = self.automation_definitions
for automation_definition in automation_definitions:
if 'parent' not in automation_definition:
css_uris = get_css_uris(automation_definition)
break
else:
css_uris = []
return {
'automations': automation_definitions,
'css_uris': css_uris,
'timestamp_value': self._timestamp_object.value,
}
def see_style(self, request):
matchdict = request.matchdict
automation_definitions = self.automation_definitions
if 'automation_slug' in matchdict:
automation_definition = self.get_automation_definition_from(
request)
elif automation_definitions:
automation_definition = automation_definitions[0]
if 'parent' in automation_definition:
automation_definition = automation_definition['parent']
else:
raise HTTPNotFound
style_definitions = automation_definition.get('display', {}).get(
'styles', [])
try:
style_definition = find_item(
style_definitions, 'uri', request.environ['PATH_INFO'])
except StopIteration:
raise HTTPNotFound
path = join(automation_definition['folder'], style_definition['path'])
try:
response = FileResponse(path, request)
except TypeError:
raise HTTPNotFound
return response
def run_automation(self, request):
automation_definition = self.get_automation_definition_from(request)
variable_definitions = get_variable_definitions(
automation_definition, 'input')
try:
data_by_id = dict(request.params) or request.json_body
except json.JSONDecodeError:
data_by_id = {}
try:
data_by_id = parse_data_by_id(data_by_id, variable_definitions)
except CrossComputeDataError as e:
raise HTTPBadRequest(e)
runs_folder = join(automation_definition['folder'], 'runs')
folder = make_random_folder(runs_folder, ID_LENGTH)
self.automation_queue.put((automation_definition, {
'folder': folder,
'data_by_id': data_by_id,
}))
run_id = basename(folder)
if 'runs' not in automation_definition:
automation_definition['runs'] = []
run_uri = RUN_ROUTE.format(run_slug=run_id)
automation_definition['runs'].append({
'name': run_id,
'slug': run_id,
'folder': folder,
'uri': run_uri,
})
# TODO: Change target page depending on definition
return {'id': run_id}
def see_automation(self, request):
automation_definition = self.get_automation_definition_from(request)
css_uris = get_css_uris(automation_definition)
return automation_definition | {
'css_uris': css_uris,
'timestamp_value': self._timestamp_object.value,
}
def see_automation_batch_mode(self, request):
automation_definition = self.get_automation_definition_from(request)
automation_folder = automation_definition['folder']
batch_definition = self.get_batch_definition_from(
request, automation_definition)
absolute_batch_folder = join(automation_folder, batch_definition[
'folder'])
mode_name = self.get_mode_name_from(request)
css_uris = get_css_uris(automation_definition)
template_text = '\n'.join(get_template_texts(
automation_definition, mode_name))
variable_definitions = get_variable_definitions(
automation_definition, mode_name, with_all=True)
request_path = request.path
for_print = 'p' in request.params
return {
'automation_definition': automation_definition,
'batch_definition': batch_definition,
'uri': request_path,
'mode_name': mode_name,
'timestamp_value': self._timestamp_object.value,
} | render_mode_dictionary(
mode_name, template_text, variable_definitions,
absolute_batch_folder, css_uris, request_path, for_print)
def see_automation_batch_mode_variable(self, request):
automation_definition = self.get_automation_definition_from(request)
automation_folder = automation_definition['folder']
batch_definition = self.get_batch_definition_from(
request, automation_definition)
mode_name = self.get_mode_name_from(request)
variable_definitions = get_variable_definitions(
automation_definition, mode_name)
matchdict = request.matchdict
variable_id = matchdict['variable_id']
try:
variable_definition = find_item(
variable_definitions, 'id', variable_id,
normalize=str.casefold)
except StopIteration:
raise HTTPNotFound
folder = join(automation_folder, batch_definition[
'folder'], mode_name)
path = join(folder, variable_definition['path'])
if not is_path_in_folder(path, folder):
raise HTTPBadRequest
if not exists(path):
raise HTTPNotFound
L.debug(variable_definition)
if splitext(path)[1] == '.dictionary':
return Response(load_variable_data(path, variable_id))
return FileResponse(path, request=request)
def get_automation_definition_from(self, request):
matchdict = request.matchdict
automation_slug = matchdict['automation_slug']
try:
automation_definition = find_item(
self.automation_definitions, 'slug', automation_slug,
normalize=str.casefold)
except StopIteration:
raise HTTPNotFound
return automation_definition
def get_batch_definition_from(self, request, automation_definition):
matchdict = request.matchdict
if 'batch_slug' in matchdict:
slug = matchdict['batch_slug']
key = 'batches'
else:
slug = matchdict['run_slug']
key = 'runs'
try:
batch_definition = find_item(automation_definition.get(
key, []), 'slug', slug)
except StopIteration:
raise HTTPNotFound
return batch_definition
def get_mode_name_from(self, request):
matchdict = request.matchdict
mode_code = matchdict['mode_code']
try:
mode_name = MODE_NAME_BY_CODE[mode_code]
except KeyError:
raise HTTPNotFound
return mode_name
def render_mode_dictionary(
mode_name, template_text, variable_definitions, absolute_batch_folder,
css_uris, request_path, for_print):
m = {'css_uris': css_uris.copy(), 'js_uris': [], 'js_texts': []}
i = count()
def render_html(match):
matching_text, terms = match.group(0), match.group(1).split('|')
variable_id = terms[0].strip()
try:
d = find_item(variable_definitions, 'id', variable_id)
except StopIteration:
L.warning('%s in template but not in configuration', variable_id)
return matching_text
view = VariableView.get_from(d).load(absolute_batch_folder)
element = view.render(
mode_name, f'v{next(i)}', terms[1:], request_path, for_print)
for k, v in m.items():
extend_uniquely(v, element[k])
return element['body_text']
return m | {
'body_text': get_html_from_markdown(VARIABLE_ID_PATTERN.sub(
render_html, template_text)),
'js_text': '\n'.join(m['js_texts']),
}
L = getLogger(__name__)
|
'''
Created on May 6, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.device import Device
class SirenDevice(Device):
"""
Siren
"""
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
"""
Constructor
:param botengine:
:param device_id:
:param device_type:
:param device_description:
:param precache_measurements:
"""
Device.__init__(self, botengine, device_id, device_type, device_description, precache_measurements=precache_measurements)
# Microservice this siren is locked to
self.locked_microservice = None
def get_device_type_name(self):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Siren")
def get_icon(self):
"""
:return: the font icon name of this device type
"""
return "siren"
def get_icon_font(self):
"""
Get the icon font package from which to render an icon
:return: The name of the icon font package
"""
import utilities.utilities as utilities
return utilities.ICON_FONT_FONTAWESOME_REGULAR
#===========================================================================
# Capabilities
#===========================================================================
def has_dogbark(self, botengine):
"""
Determine if this siren supports a dog bark sound
:param botengine:
:return: True if this siren supports a dog bark sound
"""
return False
def has_doorbell(self, botengine):
"""
Determine if this siren supports a doorbell sound
:param botengine:
:return:
"""
return False
#===========================================================================
# Commands
#===========================================================================
def play_sound(self, botengine, sound_id, strobe, duration_sec, microservice_identifier=""):
"""
Squawk the given sound ID
:param botengine: BotEngine
:param sound_id: Sound ID to play
:param strobe: True to activate the strobe light
:param duration_sec: 1 = play once; 2+ = play this many seconds.
"""
raise NotImplementedError
def force_silence(self, botengine):
"""
Force silence, even if this is locked by some other service.
:param botengine:
:return:
"""
raise NotImplementedError
def silence(self, botengine, microservice_identifier=""):
"""
Silence
:param botengine:
:return:
"""
raise NotImplementedError
def squawk(self, botengine, warning=False, microservice_identifier=""):
"""
Squawk
:param warning: True for a little warning squawk, False for a more alarming squawk
"""
raise NotImplementedError
def alarm(self, botengine, on, microservice_identifier=""):
"""
Sound the alarm
:param on: True for on, False for off
"""
raise NotImplementedError
def disarmed(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
raise NotImplementedError
def short_warning(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
raise NotImplementedError
def about_to_arm(self, botengine, seconds_left, microservice_identifier=""):
"""
Make a unique aggressive warning noise for the amount of time remaining
:param botengine:
:return:
"""
raise NotImplementedError
def armed(self, botengine, microservice_identifier=""):
"""
Make a sound that the home is disarmed
:param botengine:
:return:
"""
raise NotImplementedError
def doorbell(self, botengine, microservice_identifier=""):
"""
Doorbell sound
:param botengine:
:return:
"""
raise NotImplementedError
def bark(self, botengine, duration_sec, microservice_identifier=""):
"""
Dog bark
:param botengine:
:param duration_sec
:return:
"""
raise NotImplementedError
def door_opened(self, botengine, microservice_identifier=""):
"""
Door opened chime
:param botengine:
:return:
"""
raise NotImplementedError
def lock(self, botengine, microservice_identifier):
"""
Lock the siren to some microservice - for example to use the siren exclusively for security purposes.
:param botengine:
:param microservice_identifier:
:return:
"""
if self.locked_microservice is None:
botengine.get_logger().info("Siren: LOCKING SIREN TO MICROSERVICE {}".format(microservice_identifier))
self.locked_microservice = microservice_identifier
else:
botengine.get_logger().info("Siren: Cannot lock siren again - siren is currently locked by {}".format(self.locked_microservice))
def unlock(self, botengine):
"""
Unlock the siren
:param botengine:
:param microservice_identifier:
:return:
"""
self.locked_microservice = None
|
from .training_runtime_controller import router as TrainingRuntimeEnvironmentController
from .training_runtime_service import TrainingRuntimeService
|
import pytest
from credsweeper.filters import ValueAllowlistCheck
from tests.test_utils.dummy_line_data import get_line_data
class TestValueAllowlistCheck:
def test_value_allowlist_check_p(self, file_path: pytest.fixture, success_line: pytest.fixture) -> None:
line_data = get_line_data(file_path, line=success_line, pattern=r"(?P<value>.*$)")
assert ValueAllowlistCheck().run(line_data) is False
@pytest.mark.parametrize("line", [
"ENC(Crackle123)",
])
def test_value_allowlist_check_n(self, file_path: pytest.fixture, line: str) -> None:
line_data = get_line_data(file_path, line=line, pattern=r"(?P<value>.*$)")
assert ValueAllowlistCheck().run(line_data) is True
def test_value_allowlist_check_none_value_n(self, file_path: pytest.fixture, success_line: pytest.fixture) -> None:
line_data = get_line_data(file_path, line=success_line)
assert ValueAllowlistCheck().run(line_data) is True
|
from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class MuxinTA1ClassificationTemplate1(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "MuxinTA1ClassificationTemplate1",
"taskSubtype": {TaskKeyword.BINARY.name, TaskKeyword.MULTICLASS.name},
"taskType": TaskKeyword.CLASSIFICATION.name,
"inputType": "table", # See SEMANTIC_TYPES.keys() for range of values
"output": "model_step", # Name of the final step generating the prediction
"target": "extract_target_step", # Name of the step generating the ground truth
"steps": [
{
"name": "to_dataframe_step",
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["template_input"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
{
"name": "extract_attribute_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': (
'https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/Attribute',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "extract_target_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "encode1_step",
"primitives": ["d3m.primitives.data_preprocessing.unary_encoder.DSBOX"],
"inputs": ["extract_attribute_step"]
},
{
"name": "encode2_step",
"primitives": ["d3m.primitives.data_preprocessing.encoder.DSBOX"],
"inputs": ["encode1_step"]
},
{
"name": "corex_step",
"primitives": ["d3m.primitives.feature_construction.corex_text.DSBOX"],
"inputs": ["encode2_step"]
},
{
"name": "to_numeric_step",
"primitives": ["d3m.primitives.data_transformation.to_numeric.DSBOX"],
"inputs":["corex_step"],
},
{
"name": "impute_step",
# "primitives": ["d3m.primitives.data_preprocessing.mean_imputation.DSBOX"],
"primitives": ["d3m.primitives.data_preprocessing.greedy_imputation.DSBOX"],
# "primitives": ["d3m.primitives.data_preprocessing.iterative_regression_imputation.DSBOX"],
"inputs": ["to_numeric_step", "extract_target_step" ]
},
{
"name": "model_step",
"runtime": {
"cross_validation": 2,
# "cross_validation":1,
"stratified": True
},
"primitives": [
{
"primitive":
"d3m.primitives.classification.random_forest.SKlearn",
"hyperparameters":
{
'max_depth': [(2), (4)], # (10), #
'n_estimators': [(10), (30)],
'add_index_columns': [True],
'use_semantic_types':[True],
}
},
],
"inputs": ["impute_step", "extract_target_step"]
}
]
}
# @override
|
class A:
@classmethod
def test(cls, param):
return None
class B(A):
@classmethod
def test(cls, param):
if param == 1:
return 1
raise NotImplementedError
class C(B):
pass
|
# Eugene Tin
# TP061195
# ASIA PACIFIC UNIVERSITY OF TECHNOLOGY AND INNOVATION
# GITHUB REPO https://github.com/EuJin03/SCRS_APU
# Chia Wen Xuen
# TP061184
# ASIA PACIFIC UNIVERSITY OF TECHNOLOGY AND INNOVATION
# Copyright (C) 2021 SCRS_APU Open Source Project
# Licensed under the MIT License
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://choosealicense.com/licenses/mit/
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# convention
# 1. return multiple statements using list
# 2. double quotes for str
# 3. current_user variable holds logged in user details
# 4. snake casing variables
# 5. store values in the form of json data
# 6. convert datetime into str datatype
# 7. tabs preferred, default indentation=2
# 8. four classes of function: util/users/vehicles/user_interface
# 9. two data files: userlist/carlist
# admin credentials
# username: admin
# password: admin
# customer credentials
# username: wenxuen
# password: wenxuen
import os
import re
import datetime
from datetime import timedelta
import json
import hashlib
clear = lambda: os.system("cls")
# ---------------------------------------------------------------------------------
# UTILITIES FUNCTIONS
# ---------------------------------------------------------------------------------
def read_file(filename):
# -------------------------
# read txt files
# -------------------------
try:
with open(filename) as f:
data = f.read()
return json.loads(data)
except:
return []
def write_file(filename, content):
# -------------------------
# write txt files
# -------------------------
with open(filename, "w") as f:
f.write(json.dumps(content, indent=2, sort_keys=True, default=str))
f.close()
return
def hash_password(password):
# -------------------------
# hash password
# -------------------------
return hashlib.sha256(str.encode(password)).hexdigest()
def validation(username="*****", email="*****@mail.com", password="*****", confirm_password="*****"):
# -------------------------
# user info validation
# -------------------------
userlist = read_file("userlist.txt")
# password
if len(password) < 5:
return [True, "Password length must be greater than 5"]
# password
if password != confirm_password:
return [True, "Password do not match, please try again"]
# username
if len(username) < 5:
return [True, "Username length must greater than 5"]
# username
for user in userlist:
if user[0].lower() == username.lower():
return [True, "Username has been taken, please try again"]
# email
REGEX = re.compile(r'[^@]+@[^@]+\.[^@]+')
if not REGEX.match(email):
return [True, "Email format is incorrect, please try again"]
return [False]
def user_input():
# -------------------------
# user info
# extend from register
# -------------------------
clear()
print("REGISTRATION")
print("------------")
# username
while True:
username = input("Username: ")
validated_info = validation(username=username)
if validated_info[0]:
print(validated_info[1])
continue
else:
break
# email
while True:
email = input("Email: ")
validated_info = validation(email=email)
if validated_info[0]:
print(validated_info[1])
continue
else:
break
# password
while True:
password = input("Password: ")
confirm_password = input("Confirm Password: ")
validated_info = validation(password=password, confirm_password=confirm_password)
if validated_info[0]:
print(validated_info[1])
continue
else:
break
clear()
print("----------------")
print("Personal Details")
print("----------------")
# contact
while True:
contact = input("Contact Number: +6")
if not contact.isnumeric():
print("Contact number must contain numbers only...")
continue
else:
break
while True:
city = input("Currently lived in [state]: ")
if len(city) < 4:
print("State not found, please try again...")
continue
else:
break
while True:
print("\nWould you like to make an initial deposit into your wallet?")
print("<Enter> to skip the deposit\n")
wallet = input("Deposit amount: RM")
if wallet == "":
wallet = 0
break
elif not wallet.isnumeric():
print("Invalid amount")
else:
break
return [username.lower(), email, hash_password(password), "0" + contact, city, int(wallet), [], ""]
def field_control(field_text, type, wildcard="404"):
# -------------------------
# input field check with error handling
# extend from add_car()
# -------------------------
while True:
field_input = input(f"{field_text}")
if field_input == "":
return wildcard
# 0 = str
if type == 0:
if len(field_input) < 2:
print("Text unknown, please try again")
continue
else:
break
# 1 = int
if type == 1:
if field_input == "" or not field_input.isnumeric():
print("Please fill in with numbers only...")
continue
else:
break
return field_input
def rental_expire():
# -------------------------
# reset car availability status when expired
# -------------------------
carlist = read_file("carlist.txt")
for i in range(len(carlist) - 1):
car = carlist[i]
if car[-2]:
if datetime.datetime.strptime(car[-1][3], "%Y-%m-%d %H:%M:%S.%f") < datetime.datetime.now():
car[-1] = False
car[-2] = False
write_file("carlist.txt", carlist)
return
def vehicle_number():
# -------------------------
# read the latest car id in the file
# -------------------------
carlist = read_file("carlist.txt")
latest_id = 0
for car in carlist:
if car[0] > latest_id:
latest_id = car[0]
return latest_id
# ---------------------------------------------------------------------------------
# USER FUNCTIONS
# ---------------------------------------------------------------------------------
def register():
# -------------------------
# Register
# access: anyone
# -------------------------
clear()
userlist = read_file("userlist.txt")
user_detail = user_input()
userlist.append(user_detail)
write_file("userlist.txt", userlist)
clear()
if user_detail[-3] != 0:
float_price = "{:.2f}".format(user_detail[-3])
print(f"Total amount of RM{float_price} deposited into your account")
print("You have registered successfully, please login now...")
def login(username, password):
# -------------------------
# Login
# access: anyone
# -------------------------
userlist = read_file("userlist.txt")
err = True
for user in userlist:
if user[0] == username.lower():
if user[2] == hash_password(password):
err = False
clear()
print("You have login successfully")
return user
if err:
clear()
input("Username or password is incorrect, please try again...\n\n <Enter> to return back to main menu...")
return ""
def display_user(current_user):
# -------------------------
# Display user information to modify
# access: logged in users
# -------------------------
clear()
username = current_user[0][0]
email = current_user[0][1]
contact = "+6" + str(current_user[0][3])
print("Update Personal Information\n")
print(f"1. Username: [{username}]\n2. Email: [{email}]\n3. Contact Number: [{contact}]\n4. Password\n\n0. Go Back\n")
detail = input("Which detail do you wish to update? ")
clear()
return detail
def update_user(action, current_user):
# -------------------------
# Update user information
# access: logged in users
# -------------------------
if not action.isnumeric() or action > "4":
return [False, "something went wrong", current_user[0]]
if action == "0":
return ""
userlist = read_file("userlist.txt")
# update username
while action == "1":
username = input("Enter new username: ")
validated = validation(username=username)
if validated[0]:
clear()
print(validated[1])
if not validated[0]:
for user in userlist:
if user[0] == current_user[0][0]:
user[0] = username
break
write_file("userlist.txt", userlist)
return [False, "User info has been successfully updated!", user]
# update email
while action == "2":
email = input("Enter new email: ")
validated = validation(email=email)
if validated[0]:
clear()
print(validated[1])
if not validated[0]:
for user in userlist:
if user[0] == current_user[0][0]:
user[1] = email
break
write_file("userlist.txt", userlist)
return [False, "User info has been successfully updated!", user]
# update contact
while action == "3":
contact = input("Enter new contact number: +6")
if not contact.isnumeric():
clear()
print("Please insert correct information...")
continue
for user in userlist:
if user[0] == current_user[0][0]:
user[3] = contact
break
write_file("userlist.txt", userlist)
return [False, "User info has been successfully updated!", user]
# update password
while action == "4":
err = False
clear()
old = input("Enter old password: ")
new_password = input("\nEnter new password: ")
new_confirm = input("Confirm new password: ")
validated = validation(password=new_password, confirm_password=new_confirm)
if validated[0]:
clear()
print(validated[1])
continue
for user in userlist:
if user[0] == current_user[0][0]:
if user[2] != hash_password(old):
err = True
clear()
print("Old password incorrect\n\n1. Retry\n0. Quit\n")
choice = input("Choice: ")
if choice == "0":
clear()
return [True, "Please try again later..."]
if choice == "1":
continue
if not validated[0] and not err:
user[2] = hash_password(new_password)
break
write_file("userlist.txt", userlist)
return [True, "User info has been successfully updated, please login again..."]
def modify_wallet(current_user):
# -------------------------
# Deposit money into wallet
# access: anyone
# -------------------------
clear()
balance = current_user[0][5]
decimal_balance = "{:.2f}".format(balance)
print(f"Your total balance remaining: RM{decimal_balance}\n")
print("1. Add fund\n<Enter> to Quit\n")
add_fund = input("Do you wish to add fund into your account? ")
while True:
if add_fund != "1":
return 0
if add_fund == "1":
amount = input("Enter the amount you wished to deposit: RM")
userlist = read_file("userlist.txt")
amount = "{:.2f}".format(int(amount))
for user in userlist:
if user[0] == current_user[0][0]:
user[5] = float(user[5]) + float(amount)
updated_user = user
break
write_file("userlist.txt", userlist)
current_user[0] = updated_user
clear()
print(f"Total fund of RM{amount} has been deposited")
input("<Enter> to return...")
break
def rent_car(id, current_user):
# -------------------------
# Book a car and payment
# access: customer
# -------------------------
clear()
carlist = read_file("carlist.txt")
userlist = read_file("userlist.txt")
for car in carlist:
if car[0] == id:
if car[-2]:
return [True, "Car is already been taken by someone"]
brand = car[2].capitalize()
model = car[3].capitalize()
year = str(car[4])
price = "{:.2f}".format(car[8])
print(f"You have selected {brand} {model}, {year}")
print(f"Rental price for this product will be fixed at the rate of RM{price} per day\n")
confirmation = input("Do you want to confirm order? [yes/No]: ")
if confirmation.lower() == "no":
return ""
duration = input("How many days would you like to rent? ")
while confirmation.lower() == "yes":
total_price = float(price) * int(duration)
for user in userlist:
if user[0] == current_user[0][0]:
if user[5] < total_price:
return [True, "Insufficient balance, you are broke!"]
username = current_user[0][0]
# update car to rented
car[-2] = True
car[-1] = [username, duration, datetime.datetime.now(), datetime.datetime.now() + timedelta(days=int(duration))]
# update user rental history
user[6].append(car)
user[5] -= total_price
write_file("carlist.txt", carlist)
write_file("userlist.txt", userlist)
current_user[0] = user
total_price = "{:.2f}".format(total_price)
print(f"\nTotal payment made RM{total_price}")
print(f"Your booking order for {brand} {model}, {year} for the duration of {duration} days has been confirmed\nEnjoy your ride!")
end = input("Press Enter to return back to home page!")
return end
break
def assign_admin():
# -------------------------
# Assign a new user to be an administrator
# access: admin
# -------------------------
userlist = read_file("userlist.txt")
usernames = [] # list of registered usernames
# display usernames
for user in userlist:
if user[7] != "admin":
usernames.append(user[0])
usernames = list(set(usernames))
usernames.sort()
print("ASSIGN AN ADMINISTRATOR\n")
print(f"Search by usernames:\n")
count = 0
for names in usernames:
print(f"* {names}")
count+=1
print("\n<Enter> to return")
selected_username = input("\nType an username that are in the list: ")
# return
if selected_username == "":
return selected_username
# error handling
if selected_username.isnumeric():
print("Username does not exist...")
end = input("<Enter> to return")
clear()
return end
# error handling 2
if not selected_username.isnumeric() and not selected_username.lower() in usernames:
print("Username does not exist...")
end = input("<Enter> to return")
clear()
return end
for user in userlist:
if user[0] == selected_username:
confirmation = input(f"\nDo you wanna assign {user[0]} as an admin? [yes/No] ")
if confirmation.lower() == "yes":
user[7] = "admin"
write_file("userlist.txt", userlist)
clear()
print("-"*30)
print("SCRS MEMBER MANAGEMENT")
print("-"*30, "\n")
print(f"{user[0]} has successfully promoted as an administrator for SUPER CAR RENTAL SYSTEM\n")
return input("<Enter> to return to admin menu...")
else:
return ""
def display_feedback(current_user=[]):
# -------------------------
# Display feedbacks by customers that has used our service
# access: everyone
# -------------------------
userlist = read_file("userlist.txt")
print("-"*30)
print("SUPER CAR RENTAL SERVICE (SCRS)")
print("-"*30, "\n")
header = ["Username", "Rating", "Customer Feedback"]
format_row = "|{:^25}|{:^40}|{:^80}|"
print(format_row.format(*header))
print("-"*150)
for user in userlist:
if len(user) == 9:
username = user[0]
rating = user[8][0]
feedback = user[8][1][0:60] + "..."
print(format_row.format(username, "✰ "*rating, feedback))
if len(current_user) > 0:
while len(current_user[6]) != 0 and len(current_user) == 8:
choice = submit_feedback(current_user[0])
if len(choice[1]) > 0:
current_user = choice[1]
if choice[0] == "":
break
end = input("\nPress <Enter> to return to main menu...")
clear()
return [end, current_user]
def submit_feedback(username):
# -------------------------
# Allow customer to submit feedback
# access: customer that used SCRS at least once
# -------------------------
userlist = read_file("userlist.txt")
submission = input("\nDo you want to submit your own feedback or provide any suggestions? [yes/No] ")
if submission.lower() == "yes":
clear()
print("-"*30)
print("SUPER CAR RENTAL SERVICE (SCRS)")
print("-"*30, "\n")
print("Customer Feedback Form\n")
while True:
rating = input("On a scale of 1-5, how would you rate Super Car Rental Service? ")
if rating.isnumeric() and rating > "0" and rating < "6":
break
while True:
feedback = input("Feel free to give short opinion on our service: ")
if len(feedback) < 10:
print("Message length should be greater than 10 characters")
else:
break
print("-"*30)
for user in userlist:
if user[0] == username:
review = [int(rating), feedback]
user.append(review)
write_file("userlist.txt", userlist)
end = input("Your feedback has been submitted successfully! Press <Enter> key to return...")
return [end, user]
else:
return ["", []]
# ---------------------------------------------------------------------------------
# CAR FUNCTIONS
# ---------------------------------------------------------------------------------
def display_brand():
# -------------------------
# Display car brand
# access: anyone
# -------------------------
cars = read_file("carlist.txt")
brand = [] # list of registered car brand
# display car brand first
for car in cars:
brand.append(car[2])
brand = list(set(brand))
brand.sort()
print("-"*30)
print("Select Vehicle Brand")
print("-"*30, "\n")
while True:
count = 1
for i in brand:
print(f"{count}. {i}")
count+=1
print("\n0. Go back to home page")
while True:
num = input("Select a model: ")
if not num.isnumeric():
break
if int(num) <= count:
break
print("Model does not exist, please try again")
if num.isnumeric() and num < str(count):
break
return [num, brand]
def car_details(brand, default=True):
# -------------------------
# Display car details from car brand selected
# access: anyone
# -------------------------
cars = read_file("carlist.txt")
car_model = [] # display models from car brand
if default:
for car in cars:
if car[2] == brand:
car_model.append(car)
if not default:
for car in cars:
if car[2] == brand:
if car[-2] == False:
car_model.append(car)
header = ["ID", "Number Plate", "Vehicle", "Seats", "Short Description", "Condition", "Owner", "Price Rate", "Rental Status"]
format_row = "{:^6}|{:^15}|{:^30}|{:^8}|{:^35}|{:^13}|{:^10}|{:^10}|{:^16}|"
print(format_row.format(*header))
print("-"*155)
# display selected brand car model
for i in car_model:
id = i[0]
num_plate = i[1]
brand = i[2].capitalize()
model = i[3].capitalize()
year = i[4]
vehicle = brand + " " + model + ", " + str(year)
owner = i[5]
condition = i[6]
desc = i[7][0:30] + "..."
price_rate = i[8]
seats = i[9]
availability = i[10]
if availability:
rent_by = i[11]
status = "Rented by " + str(rent_by[0])
if not availability:
status = "Not rented"
float_price_rate = "{:.2f}".format(price_rate)
print(format_row.format(id, num_plate, vehicle, seats, desc, condition, owner, float_price_rate, status))
if len(car_model) == 0:
print("Oops, nothing is here yet")
car_model.clear()
def add_car():
# -------------------------
# Add car into file
# access: admin
# -------------------------
clear()
print("-"*20)
print("SCRS Vehicle Management")
print("-"*20, "\n")
num_plate = field_control("Number Plate: ", 0)
brand = field_control("Vehicle Brand: ", 0)
model = field_control("Vehicle Model: ", 0)
year = field_control("Manufactured Year: ", 1)
owner = field_control("Owner of the vehicle: ", 0)
condition = field_control("Condition of the car [?]/10: ", 1)
desc = field_control("Short description: ", 0)
price_rate = field_control("Price rate per day: RM", 1)
seats = field_control("Number of seats: ", 1)
carlist = read_file("carlist.txt")
if str(num_plate) == "404":
return ""
latest_id = vehicle_number()
new_car = [int(latest_id) + 1, num_plate.upper(), brand.capitalize().rstrip(), model.capitalize().rstrip(), int(year), owner.capitalize().rstrip(), float(condition), desc, float(price_rate), int(seats), False, False]
carlist.append(new_car)
write_file("carlist.txt", carlist)
detail = input("Car has been successfully added to the system... <Enter> to return:")
clear()
return detail
def modify_car(id):
# -------------------------
# Update car details
# extended from select car
# access: admin
# -------------------------
clear()
print('Car model: ')
carlist = read_file("carlist.txt")
for car in carlist:
if car[0] == id:
print("Modify details of", car[2], car[3], ",", car[4])
print("Current number plate: ", car[1])
print("\n<Enter> to keep previous data...\n")
num_plate = field_control("Number Plate [" + car[1] + "]: ", 0, car[1])
brand = field_control("Vehicle Brand [" + car[2] + "]: ", 0, car[2])
model = field_control("Vehicle Model [" + car[3] + "]: ", 0, car[3])
year = field_control("Manufactured Year [" + str(car[4]) + "]: ", 1, car[4])
owner = field_control("Owner of the vehicle [" + car[5] + "]: ", 0, car[5])
condition = field_control("Condition of the car [" + str(car[6]) + "/10)" + ": ", 1, car[6])
desc = field_control("Short description: [" + car[7] + "]\n: ", 0, car[7])
price_rate = field_control("Price rate per day: [RM" + "{:.2f}".format(car[8]) + "]: ", 1, car[8])
seats = field_control("Number of seats [" + str(car[9]) + "]: ", 1, car[9])
new_car = [car[0], num_plate, brand, model, int(year), owner, float(condition), desc, float(price_rate), int(seats), car[10], car[11]]
break
for i in range(len(carlist)):
if carlist[i][0] == id:
del carlist[i]
break
carlist.append(new_car)
write_file("carlist.txt", carlist)
return [True, "Car's details has been modified successfully"]
def select_car(callback):
# -------------------------
# Select a car to modify
# access: admin
# -------------------------
clear()
print("-"*20)
print("SCRS Vehicle Management")
print("-"*20, "\n")
carlist = read_file("carlist.txt")
action = display_brand()
if action[0] == "0":
clear()
return ""
while action[0] != "0":
clear()
payload = int(action[0]) - 1
car_details(brand=action[1][payload])
latest_id = vehicle_number()
while True:
vehicle_id = input("\nSelect vehicle ID to modify or <Enter> to go back: ")
if not vehicle_id.isnumeric():
break
if int(vehicle_id) <= latest_id:
break
print("Id does not exist, please try again\n")
while len(vehicle_id) > 0:
clear()
status = callback(int(vehicle_id))
if status[0]:
print(status[1])
input("<Enter> to return back to main menu...")
break
if vehicle_id == "":
clear()
break
def rental_history(current_user):
# -------------------------
# View rental history
# access: customer
# -------------------------
clear()
userlist = read_file("userlist.txt")
print("-"*25)
print(f"{current_user[0][0]}'s Rental History")
print("-"*25, "\n")
for user in userlist:
if user[0] == current_user[0][0]:
if len(user[6]) == 0:
print("Start placing order today for exclusive discounts!\n")
return input("<Enter> to return back to home page...")
header = ["Number Plate", "Vehicle", "Booked on", "Expire on", "Duration", "Total Amount"]
format_row = "{:^20}|" * len(header)
print(format_row.format(*header))
print("-"*125)
for rent in user[6]:
num_plate = rent[1].upper()
brand = rent[2].capitalize()
model = rent[3].capitalize()
year = rent[4]
vehicle = brand + " " + model + ", " + str(year)
price_rate = rent[8]
start_date = rent[-1][2][0:11]
end_date = rent[-1][3][0:11]
duration = rent[-1][1]
price_per_order = "{:.2f}".format(float(price_rate) * int(duration))
print(format_row.format(num_plate, vehicle, start_date,end_date,str(duration) + " days", "RM " + price_per_order))
break
end = input("\n<Enter> to return back to home page...")
clear()
return end
def rented_out():
# -------------------------
# View vehicles that are currently rented out
# access: admin
# -------------------------
clear()
print("-"*25)
print("CARS ON TRANSIT RECORDS")
print("-"*25, "\n")
carlist = read_file("carlist.txt")
header = ["Number Plate", "Vehicle", "Booked on", "Expire on", "Owner", "Total Amount", "Rented By"]
format_row = "{:^20}|" * len(header)
print(format_row.format(*header))
print("-"*145)
for car in carlist:
if car[-2]:
if len(car[-1]) > 0:
booking_details = car[-1]
num_plate = car[1].upper()
vehicle = car[2].capitalize() + " " + car[3].capitalize() + ", " + str(car[4])
owner = car[5]
price_rate = car[8]
start_date = booking_details[2][0:11]
end_date = booking_details[3][0:11]
username = booking_details[0]
duration = booking_details[1]
total_price = "{:.2f}".format(int(duration) * float(price_rate))
print(format_row.format(num_plate, vehicle, start_date,end_date, owner, total_price, username))
end = input("\n<Enter> to go back...")
clear()
return end
def rent_available():
# -------------------------
# View vehicles that are available for rent
# access: admin
# -------------------------
clear()
print("-"*20)
print("SCRS Vehicle Management")
print("-"*20, "\n")
action = display_brand()
if action[0] == "0":
clear()
return ""
while action[0] != "0":
clear()
print("-"*20)
print("Available vehicle")
print("-"*20, "\n")
payload = int(action[0]) - 1
car_details(action[1][payload], False)
input("\nPress Enter to quit: ")
clear()
break
def customer_payment():
# -------------------------
# View customer bookings and payments
# access: admin
# -------------------------
clear()
print("-"*20)
print("SCRS Customer Order Record")
print("-"*20, "\n")
userlist = read_file("userlist.txt")
for user in userlist:
if len(user[6]) > 0:
username = user[0]
email = user[1]
total_spent = 0
print("-"*15)
print(f"Username: {username}")
print(f"Email: {email}")
print("-"*15, "\n")
header = ["Number Plate", "Vehicle", "Booked on", "Expire on", "Duration", "Total Amount", "Rented By"]
format_row = "{:^20}|" * len(header)
print(format_row.format(*header))
print("-"*150)
for data in user[6]:
num_plate = data[1]
start_date = data[-1][2][0:11]
end_date = data[-1][3][0:11]
duration = data[-1][1]
vehicle = f"{data[2]} {data[3]}, {data[4]}"
price_per_order = "{:.2f}".format(data[8] * int(duration))
print(format_row.format(num_plate, vehicle, start_date,end_date, duration + " days", "RM " + price_per_order, username))
total_spent += float(price_per_order)
str_spent = "{:.2f}".format(total_spent)
print("-"*5)
print(f"Total amount earned: RM {str_spent}\n")
end = input("<Enter> to go back...")
clear()
return end
def customer_query():
userlist = read_file("userlist.txt")
usernames = [] # list of registered usernames
# display usernames
for user in userlist:
usernames.append(user[0])
usernames = list(set(usernames))
usernames.sort()
print(f"Search by usernames:\n")
count = 0
for names in usernames:
print(f"{count}. {names}")
count+=1
print("\n<Enter> to return")
selected_username = input("\nSelect an user by listed numbers or type the username: ")
# return
if selected_username == "":
return selected_username
# error handling
if selected_username.isnumeric():
if float(selected_username) >= len(usernames) or int(selected_username) < 0:
print("Username does not exist...")
end = input("<Enter> to return")
clear()
return end
# error handling 2
if not selected_username.isnumeric() and not selected_username.lower() in usernames:
print("Username does not exist...")
end = input("<Enter> to return")
clear()
return end
while True:
clear()
if selected_username.isnumeric():
for user in userlist:
if user[0] == usernames[int(selected_username)]:
username = user[0]
email = user[1]
print("-"*15)
print(f"Username: {username}")
print(f"Email: {email}")
print("-"*15, "\n")
header = ["Number Plate", "Vehicle", "Booked on", "Expire on", "duration", "Total Amount"]
format_row = "{:^20}|" * len(header)
print(format_row.format(*header))
print("-"*125)
if len(user[6]) > 0:
for record in user[6]:
start_date = record[-1][2][0:11]
end_date = record[-1][3][0:11]
duration = record[-1][1]
vehicle = f"{record[2]} {record[3]}, {record[4]}"
num_plate = record[1].upper()
price_per_order = "RM " + "{:.2f}".format(record[8] * int(duration))
print(format_row.format(num_plate, vehicle, start_date,end_date,str(duration) + " days", price_per_order))
# query by username
if len(selected_username) > 1:
for user in userlist:
if user[0].lower() == selected_username.lower():
username = user[0]
email = user[1]
print("-"*15)
print(f"Username: {username}")
print(f"Email: {email}")
print("-"*15, "\n")
header = ["Number Plate", "Vehicle", "Booked on", "Expire on", "duration", "Total Amount"]
format_row = "{:^20}|" * len(header)
print(format_row.format(*header))
print("-"*125)
if len(user[6]) > 0:
for record in user[6]:
start_date = record[-1][2][0:11]
end_date = record[-1][3][0:11]
duration = record[-1][1]
vehicle = f"{record[2]} {record[3]}, {record[4]}"
num_plate = record[1].upper()
price_per_order = "RM " + "{:.2f}".format(record[8] * int(duration))
print(format_row.format(num_plate, vehicle, start_date,end_date,str(duration) + " days", price_per_order))
end = input("\n<Enter> to return...")
clear()
return end
# ---------------------------------------------------------------------------------
# USER INTERFACE
# ---------------------------------------------------------------------------------
def main():
current_user = []
clear()
print('-'*30)
print('Super Car Rental Service (SCRS)')
print('-'*30)
# main page without login
while len(current_user) == 0:
print('\n1. Login\n2. Register\n3. View Cars\n4. Feedback/Suggestion\n\n0. Quit\n')
option = input('Please select a choice: ')
while option == "4":
clear()
end = display_feedback()
if end[0] == "":
break
while option == "3":
clear()
action = display_brand()
if action[0] == "0":
clear()
break
while action[0] != "0":
clear()
payload = int(action[0]) - 1
brand = action[1][payload]
print("-"*20)
print(brand)
print("-"*20, "\n")
car_details(brand=brand)
input("\nPress Enter to quit: ")
clear()
break
if option == "2":
register()
if option == "1":
clear()
print("LOGIN\n")
username = input("Username: ")
password = input("Password: ")
login_user = login(username, password)
if login_user == "":
main()
current_user.append(login_user)
if option == "0":
break
# admin interface
while len(current_user) > 0 and current_user[0][7].lower() == "admin":
clear()
print('-'*20)
print('Super Car Rental Service (SCRS)')
print('-'*20, "\n")
print("Welcome, " + current_user[0][0].capitalize(), "\n")
print("1. Add a Vehicle\n2. Modify a Vehicle\'s Details\n3. Update Personal Information\n4. Vehicle Rental Records\n5. Query Customer Record\n\n6. Assign a new administrator\n7. Customer Feedback\n\n0. Logout\n")
admin_option = input("Please enter your choice: ")
# Customer feedback
while admin_option == "7":
clear()
end = display_feedback()
if end[0] == "":
break
# Assign admin
while admin_option == "6":
clear()
end = assign_admin()
if end == "":
break
# customer record query
while admin_option == "5":
clear()
print("-"*20)
print("SCRS Customer Records Management")
print("-"*20, "\n")
end = customer_query()
if end == "":
break
# rental records
while admin_option == "4":
clear()
print("-"*20)
print("SCRS Vehicle Management")
print("-"*20, "\n")
print("1. Vehicles in transit\n2. Vehicles available for Rent\n3. Customer Payments for a specific time duration\n\n0.Back\n")
record_option = input("Please enter your choice: ")
# return
if record_option == "0":
break
# cars booked
while record_option == "1":
clear()
end = rented_out()
if end == "":
clear()
break
# cars available
while record_option == "2":
clear()
end = rent_available()
if end == "":
break
# customer payments
while record_option == "3":
clear()
end = customer_payment()
if end == "":
break
# update personal info
while admin_option == "3":
action = display_user(current_user)
payload = update_user(action, current_user)
if payload == "":
break
if payload[0]:
print(payload[1])
input("<Enter> to continue")
current_user[0] = []
main()
if not payload[0]:
print(payload[1])
current_user[0] = payload[2]
choice = input("<Enter> to continue...")
break
# modify vehicle
while admin_option == "2":
data = select_car(modify_car)
if data == "":
break
# add vehicle
while admin_option == "1":
data = add_car()
if data == "":
break
# quit
if admin_option == "0":
current_user.clear()
main()
break
# customer interface
while len(current_user) > 0 and current_user[0][7].lower() != "admin":
clear()
print("-"*30)
print("SUPER CAR RENTAL SERVICE")
print("-"*30, "\n")
print("Welcome, " + current_user[0][0].capitalize() + "\n")
print('1. Rent a Car\n2. Update Personal Information\n3. Rental History\n4. Check Wallet\n5. Customer Feedback\n\n0. Logout\n')
user_option = input("Please enter your choice: ")
# feedback / suggestion
while user_option == "5":
clear()
choice = display_feedback(current_user[0])
if len(choice[1]) > 0:
current_user[0] = choice[1]
if choice[0] == "":
break
# check wallet
while user_option == "4":
end = modify_wallet(current_user)
if end == 0:
break
# rental history
while user_option == "3":
end = rental_history(current_user)
if end == "":
break
# update personal info
while user_option == "2":
action = display_user(current_user)
payload = update_user(action, current_user)
if payload == "":
break
if payload[0]:
print(payload[1])
input("<Enter> to continue")
current_user[0] = []
main()
if not payload[0]:
print(payload[1])
current_user[0] = payload[2]
choice = input("<Enter> to continue...")
break
if choice:
break
# rent car
while user_option == "1":
clear()
action = display_brand()
if action[0] == "0":
clear()
break
while action[0] != "0":
clear()
payload = int(action[0]) - 1
car_details(brand=action[1][payload])
while True:
latest_id = vehicle_number()
vehicle_id = input("\nSelect vehicle ID to modify or <Enter> to go back: ")
if not vehicle_id.isnumeric():
break
if int(vehicle_id) <= latest_id:
break
print("Id does not exist, please try again\n")
while len(vehicle_id) > 0:
clear()
status = rent_car(int(vehicle_id), current_user)
if status == "":
break
try:
if status[0]:
print("\n", "-"*30)
print(status[1])
print("-"*30, "\n")
retry = input("Please select other car available for rent. <Enter> to continue")
if retry == "":
clear()
break
except:
return
if vehicle_id == "":
clear()
break
if user_option == "0":
current_user.clear()
main()
break
# ---------------------------------------------------------------------------------
rental_expire() # return car
main()
|
# coding: utf_8
import keras
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# os.environ["CUDA_VISIBLE_DEVICES"] = "0" #specify a GPU if you need
images_path = './testimages/*' #input images which you want to inference
output_dir = './inference/' #directory which inferenced images will
score_threshold = 0.3 #decide threshold
model_path = './snapshots/resnet152_pascal.h5' #path to the trained h5 file
#os.makedirs(output_dir)
keras.backend.tensorflow_backend.set_session(get_session())
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet152')
# if the model is not converted to an inference model, use the line below
# see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model
model = models.convert_model(model)
print(model.summary())
labels_to_names = {
11: 'Malignant'}
import glob
from PIL import Image, ImageDraw, ImageFont
for imgpath in glob.glob(images_path):
print(imgpath)
image = np.asarray(Image.open(imgpath).convert('RGB'))
# copy to draw on
draw = image.copy()
#draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
# correct for image scale
boxes /= scale
# visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < score_threshold:
break
color = label_color(label)
b = box.astype(int)
draw_box(draw, b, color=color)
print((label, score, b))
caption = "{} {:.3f}".format(labels_to_names[label], score)
draw_caption(draw, b, caption)
imgfile = os.path.basename(imgpath)
Image.fromarray(draw).save(os.path.join(output_dir, imgfile))
|
class TFTFarmingCalculator:
def __init__(self, node_id, node_config, threefold_explorer ):
self.threefold_explorer = threefold_explorer
#configuration as used for the node
self.node_config = node_config
#unique node id in the TFGrid
self.node_id = node_id
@property
def is_certified(self):
"""
ThreeFold has created a certification program which Farmers can opt in for.
Certified farmers will have to buy their hardware from a certified hardware vendor.
ThreeFold makes sure that that hardware is optimal from energy perspective
and that the security features
are optimally implemented (silicon route of trust, secure bios & boot, ...)
ThreeFold will also make sure that network is good enough to the internet,
If certified farmers will be in breach with their farming contract, they loose
their certification and become a default farmer.
ThreeFold with the help of the ThreeFold Explorer nodes checks on quality achieved in
relation to the certification contract.
The foundation will give free certification to boxes which benefit the distribution
of nodes in the grid e.g. right now in Africa almost no capacity, whoever put boxes which are
well distributed and they are bought from a certified partner will not have to pay for the
certification a monthly or setup fee for a certain period.
The boxes will still be certified though and the network uptime & capacity measured, its
not a free pass to get more TFT.
"""
return self.threefold_explorer.is_certified(self.node_id)
@property
def network_capability_zone(self):
"""
south america & africa are emerging location, today the explorer returns 10
ThreeFold uses best possible technical means to define the location of the node
depending of the location ad network capability map as maintained by the foundation
a number is returned
@return between 1 and 20, today check is very easy, when emerging country return 10, otherwise 1
"""
return self.threefold_explorer.network_capability_zone_get(self.node_id)
def bandwith_check(self):
"""
returns between 0 an 1, 1 is 100%, 0 is None
for certified hardware its always 100% (1)
"""
if self.is_certified:
return 1
# checks the threefold explorer & returns available bandwidth in mbit/sec avg
# measurement done 24 times per day each time from node region (europe to europe, ...)
# 2 MB of data is uploaded and downloaded from a random chosen node in the grid
# local nodes are not used (check is done to see if nodes are local)
bandwidth_availability = self.threefold_explorer.bandwidth_availability_get(self.node_id) #mbit/sec
if bandwidth_availability > 2 * self.network_capability_zone:
return 1
elif bandwidth_availability > 1 * self.network_capability_zone:
return 0.5
else:
return 0
def utilization_check(self,month):
"""
checks the threefold explorer & returns the utilization of the node
returns between 0 an 1, 1 is 100%, 0 is None
for the first 12 months its always 1
for certified hardware its always 100%
"""
if self.is_certified():
return 1
startmonth = self.threefold_explorer.month_start_get(self.node_id)
utilization_rate = self.threefold_explorer.utilization_rate_get(self.node_id)
if month - startmonth < 12:
#first 12 months utilization rate is 1, means all is rewarded
return 1
else:
if utilization_rate > 50:
return 1
if utilization_rate > 25:
return 0.5
return 0
def uptime_check(self):
if self.certified_capacity:
#the threefold explorer return 1 if agreed sla achieved (part of certification)
#the std requested SLA is 99.8% for a certified farmer (1.44h per month)
return self.threefold_explorer.uptime_sla_achieved(self.node_id)
else:
uptime = self.threefold_explorer.uptime_achieved(self.node_id)
if uptime < 99:
#corresponds to 7.2h, so if non certified capacity node was out for more
#than 7.2h then no TFT farmed
return 0
return 1
def difficulty_level_get(self, month):
"""
return difficulty in relation to how many token there are
the difficulty factor makes sure that there can never be more than 4 billion tokens
"""
if month == 0:
nr_of_tft_ever_farmed = 800000000 #starting point
else:
nr_of_tft_ever_farmed = int(self.simulation.tft_total(month - 1)) #look at previous month
p = nr_of_tft_ever_farmed / 4000000000
if p > 0.999999:
perc = 1000000
else:
perc = 1 / (1 - p)
return perc
def farming_cpr_tft(self,month):
"""
cpr is the cloud production rate, like a hashrate for a bitcoin miner
in our case a production rate of capacity for the internet
cost to buy 1 cpr production capability in Q1 2020 = 40USD
we took as definition that nr for cpr to usd
we say ROI for batch 1 (month 1) is 6 months, thats why we need to devide by 6
ROI = Return on investment
"""
cpr_investment_cost_in_usd_month = j.tools.tfgrid_simulator.simulator_config.tokenomics.cpr_investment_usd / 6
return cpr_investment_cost_in_usd_month / self.simulation.tft_price_get(month)
def tft_farm(self, month):
"""
calculate the farming of tft's
"""
#cpr is like a hashrate for a bitcoin miner
#in our case it represents the capability for a node to produce cloud units (our IT capacity)
tft_farmed = self.node_config.cpr * self.farming_cpr_tft(month) / self.difficulty_level_get(month)
return tft_farmed * self.uptime_check() * self.utilization_check() * self.bandwith_check()
|
"""Utility functions."""
import networkx as nx
class Utility(object):
"""Base class for all utility functions."""
def __init__(self):
pass
def utility_of_relation(self, model, r, v):
pass
def utility_of_substitution(self, s):
pass
def best(self):
pass
def add(self, u1, u2):
pass
class UtilityNorm(Utility):
"""Normalized utility."""
def utility_of_relation(self, model, r, v):
"""Returns the utility of a relation node r that should substitute the
given variable v.
Utility depends on the variable to substitute (v). Relations may be
executed in a specific direction, i.e, the input nodes change,
consequently, also the properties change, which influence the utility
(e.g., sample rate).
The properties of relations may differ, hence only the available ones
are included in calculation.
"""
# input variables for relation
rin = set(model.predecessors(r)) - set([v])
# relation should have at least one predecessor (i.e., be connected)
assert len(rin) >= 1, """relation node {} must be connected to
variable(s)""".format(r)
# utility function by weighted sum
w = [
0.3,
0.5,
0.1,
0.5,
]
u = [1.0] * len(w)
# only one node, perfect; the more nodes, the worse
u[0] = 1.0 / len(rin)
# penalize computational costs (cost > 0, penalizes more relations)
u[1] = 1.0
if model.has_property(r, 'cost'):
u[1] = 1.0 / model.property_value_of(r, 'cost')
# penalize unprovided variables (penalizes more relations too)
u[2] = 1.0 / (len(model.unprovided(rin)) + 1)
# penalize low sample rate (or difference to desired sample rate?)
# penalize inaccuracy of input variables
u[3] = 1.0
for v in rin:
if model.has_property(v, 'accuracy'):
u[3] = u[3] * model.property_value_of(v, 'accuracy')
# weighted sum and normalize
uf = sum([wi*ui for wi, ui in zip(w, u)]) / sum(w)
assert uf >= 0 and uf <= 1, "utility not normalized"
return uf
def utility_of_substitution(self, s):
"""Returns the product of node-utilities.
Utility of a node depends on the tree structure (execution direction of
relations from root node) and the input variables (e.g., sample rate).
"""
# empty substitution is also fine
if len(s) == 0:
return self.best()
# get substitution tree with intermediate variables (for utility
# calculation we need the predecessor variables for relations)
t, vin = s.tree(collapse_variables=False)
t = t.to_undirected() # ignore direction
# get all predecessors (by bfs)
pre = dict(nx.bfs_predecessors(t, s.root))
# product of utilities (of all relations given their predecessors)
u = 1.0
for r in s:
ui = self.utility_of_relation(s.model, r, pre[r])
assert ui >= 0 and ui <= 1, "Utility must be normalized!"
u = self.add(u, ui)
return u
def best(self):
"""Returns best utility, i.e., initial of an empty substitution."""
return 1.0
def add(self, u1, u2):
"""Adds up utilities, and returns the result.
Defines how the utilities are added up in a substitution (e.g., sum
vs. product).
"""
return u1 * u2
|
import errno
import os
def create_file_if_needed(file_name):
"""
Create a specified file if it doesn't exist
:param file_name: The file to check and create
"""
if not os.path.exists(os.path.dirname(file_name)):
try:
os.makedirs(os.path.dirname(file_name))
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
|
from unittest import TestCase
from regene.expression.character_class import CharacterClassFactory
class TestCharacterSet(TestCase):
def test_only_characters(self):
assert str(CharacterClassFactory.get("[abcde]")) == "a"
def test_a_single_range(self):
assert str(CharacterClassFactory.get("[0-6]")) == "0"
def test_multiple_ranges(self):
assert str(CharacterClassFactory.get("[0-6A-Z]")) == "0"
def test_mixed_ranges_and_characters(self):
assert str(CharacterClassFactory.get("[B0-6A-Z]")) == "B"
def test_mixed_characters_and_ranges(self):
assert str(CharacterClassFactory.get("[A-ZB0-6]")) == "B"
def test_dash(self):
assert str(CharacterClassFactory.get("[\-]")) == "-"
def test_multiplication(self):
assert CharacterClassFactory.get("[abc]") * 4 == "aaaa"
def test_ranged_multiplication(self):
assert CharacterClassFactory.get("[b-z]") * 4 == "bbbb"
|
# Copyright (c) Nanjing University, Vision Lab.
# Last update: 2019.10.04
import tensorflow as tf
import numpy as np
import h5py
tf.enable_eager_execution()
# def select_voxels(vols, points_nums, offset_ratio=1.0, init_thres=-1.0):
# '''Select the top k voxels and generate the mask.
# input: vols: [batch_size, vsize, vsize, vsize, 1] float32
# points numbers: [batch_size]
# output: the mask (0 or 1) representing the selected voxels: [batch_size, vsize, vsize, vsize]
# '''
# vols = tf.squeeze(tf.convert_to_tensor(vols, dtype='float32'), axis=-1)
# points_nums = tf.cast(tf.convert_to_tensor(points_nums), 'float32')
# offset_ratio = tf.convert_to_tensor(offset_ratio, dtype='float32')
# masks = vols[0:1]# just a place holder.
# for i in range(vols.shape[0]):
# vol = tf.gather(vols, i, axis=0)
# num = tf.cast(offset_ratio* points_nums[i], 'int32')
# thres = get_thres(vol, num, init_thres)
# mask = tf.cast(tf.greater(vol, thres), 'float32')
# masks = tf.concat([masks, tf.expand_dims(mask, 0)], axis=0)
# return tf.expand_dims(masks[1:], -1)
# def get_thres(vol, num, init_thres):
# # filter out most values by the initial threshold.
# values = tf.gather_nd(vol, tf.where(vol > init_thres))
# # number of values should be larger than expected number.
# if tf.shape(values)[0] < num:
# values = tf.reshape(vol, [-1])
# # only sort the selected values.
# sorted_values, _ = tf.nn.top_k(values, num)
# thres = sorted_values[-1]
# return thres
def select_voxels(vols, points_nums, offset_ratio=1.0, init_thres=-2.0):
'''Select the top k voxels and generate the mask.
input: vols: [batch_size, vsize, vsize, vsize, 1] float32
points numbers: [batch_size]
output: the mask (0 or 1) representing the selected voxels: [batch_size, vsize, vsize, vsize]
'''
vols = vols.numpy()
points_nums = points_nums
offset_ratio = int(offset_ratio)
masks = []
for idx, vol in enumerate(vols):
num = int(offset_ratio* points_nums[idx])
thres = get_thres(vol, num, init_thres)
mask = np.greater(vol, thres).astype('float32')
masks.append(mask)
return np.stack(masks)
def get_thres(vol, num, init_thres):
# filter out most values by the initial threshold.
#values = np.gather_nd(vol, np.where(vol > init_thres))
values = vol[vol>init_thres]
# number of values should be larger than expected number.
if values.shape[0] < num:
values = np.reshape(vol, [-1])
# only sort the selected values.
values.sort()
thres = values[-num]
#sorted_values, _ = tf.nn.top_k(values, num)
#thres = sorted_values[-1]
return thres
if __name__=='__main__':
# set gpu.
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1.0
config.gpu_options.allow_growth = True
config.log_device_placement=True
# config.device_count={'gpu':0}
sess = tf.Session(config=config)
data = np.random.rand(4, 64, 64, 64, 1) * (100) -50
data = tf.convert_to_tensor(data, 'float32')
points_nums = np.array([1000, 200, 10000, 50])
offset_ratio = 1.0
init_thres = -1.0
mask = select_voxels(data, points_nums, offset_ratio, init_thres)
print(mask)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
'''DCS Config params v1 action implementations'''
import logging
import argparse
from osc_lib import utils
from osc_lib.command import command
from otcextensions.common import sdk_utils
from otcextensions.i18n import _
LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
class ListInstanceParam(command.Lister):
_description = _('List configurational parameters of a single DCS '
'instance')
columns = ('id', 'name', 'value', 'default_value')
def get_parser(self, prog_name):
parser = super(ListInstanceParam, self).get_parser(prog_name)
parser.add_argument(
'instance',
metavar='<instance>',
help=_('Name or ID of the instance')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcs
inst = client.find_instance(name_or_id=parsed_args.instance,
ignore_missing=False)
data = client.instance_params(
instance={'id': inst.id},
)
table = (self.columns,
(utils.get_item_properties(
s, self.columns,
) for s in data))
return table
class UpdateInstanceParam(command.Command):
_description = _('Update instance configurational parameters')
def get_parser(self, prog_name):
parser = super(UpdateInstanceParam, self).get_parser(prog_name)
parser.add_argument(
'instance',
metavar='<instance>',
help=_('Name or ID of the DCS instance to take backup from.')
)
parser.add_argument(
'--param',
metavar='<id:name:value>',
required=True,
action='append',
help=_('Parameter pair in format ID:NAME:VALUE.')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcs
params = []
for param in parsed_args.param:
param_parts = param.split(':')
if 3 == len(param_parts):
param_struct = {
'param_id': param_parts[0],
'param_name': param_parts[1],
'param_value': param_parts[2]
}
params.append(param_struct)
else:
msg = _('Cannot parse tag information')
raise argparse.ArgumentTypeError(msg)
if params:
print('params=%s' % params)
inst = client.find_instance(name_or_id=parsed_args.instance,
ignore_missing=False)
return client.update_instance_params(
instance={'id': inst.id}, params=params
)
class ShowInstanceParam(command.ShowOne):
_description = _('Show the details of a single instance parameter')
def get_parser(self, prog_name):
parser = super(ShowInstanceParam, self).get_parser(prog_name)
parser.add_argument(
'instance',
metavar='<instance>',
help=_('Name or UUID of the instance.')
)
parser.add_argument(
'--param',
metavar='<param>',
required=True,
help=_('ID or name of the parameter.')
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcs
inst = client.find_instance(name_or_id=parsed_args.instance,
ignore_missing=False)
data = client.instance_params(
instance={'id': inst.id},
)
criteria = parsed_args.param
found = None
for param in data:
if param.id == criteria or criteria in param.name:
found = param
break
if found:
display_columns, columns = _get_columns(found)
data = utils.get_item_properties(found, columns)
return (display_columns, data)
|
import subprocess # noqa: F401
import pytest
from pandas.io.formats.console import detect_console_encoding
from pandas.io.formats.terminal import _get_terminal_size_tput
class MockEncoding(object): # TODO(py27): replace with mock
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
side effect should be an exception that will be raised.
"""
def __init__(self, encoding):
super(MockEncoding, self).__init__()
self.val = encoding
@property
def encoding(self):
return self.raise_or_return(self.val)
@staticmethod
def raise_or_return(val):
if isinstance(val, str):
return val
else:
raise val
@pytest.mark.parametrize('empty,filled', [
['stdin', 'stdout'],
['stdout', 'stdin']
])
def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
context.setattr('sys.{}'.format(empty), MockEncoding(''))
context.setattr('sys.{}'.format(filled), MockEncoding(filled))
assert detect_console_encoding() == filled
@pytest.mark.parametrize('encoding', [
AttributeError,
IOError,
'ascii'
])
def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
# GH 21552
with monkeypatch.context() as context:
context.setattr('locale.getpreferredencoding', lambda: 'foo')
context.setattr('sys.stdout', MockEncoding(encoding))
assert detect_console_encoding() == 'foo'
@pytest.mark.parametrize('std,locale', [
['ascii', 'ascii'],
['ascii', Exception],
[AttributeError, 'ascii'],
[AttributeError, Exception],
[IOError, 'ascii'],
[IOError, Exception]
])
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
# When both the stdout/stdin encoding and locale preferred encoding checks
# fail (or return 'ascii', we should default to the sys default encoding.
# GH 21552
with monkeypatch.context() as context:
context.setattr(
'locale.getpreferredencoding',
lambda: MockEncoding.raise_or_return(locale)
)
context.setattr('sys.stdout', MockEncoding(std))
context.setattr('sys.getdefaultencoding', lambda: 'sysDefaultEncoding')
assert detect_console_encoding() == 'sysDefaultEncoding'
@pytest.mark.parametrize("size", ['', ['']])
def test_terminal_unknown_dimensions(monkeypatch, size, mocker):
def communicate(*args, **kwargs):
return size
monkeypatch.setattr('subprocess.Popen', mocker.Mock())
monkeypatch.setattr('subprocess.Popen.return_value.returncode', None)
monkeypatch.setattr(
'subprocess.Popen.return_value.communicate', communicate)
result = _get_terminal_size_tput()
assert result is None
|
import os
import yoloCarAccident as yc
# yc.find('test.txt')
f1 = open('result2.txt','r')
i = 0
s = ""
for lines in f1:
if(i<80000):
s += lines
i+=1
else:
f2 = open('test.txt','w')
f2.write(s)
f2.close()
try:
yc.find('test.txt')
except ValueError:
pass
s = ""
i = 0
# break
# f2 = open('test.txt','w')
# f2.write(s)
# f2.close()
# yc.find('test.txt')
|
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
import sys
import distutils.sysconfig
print("exec_prefix:%s" % sys.exec_prefix)
print("short_version:%s" % sys.version[:3])
print("long_version:%s" % sys.version.split()[0])
print("py_inc_dir:%s" % distutils.sysconfig.get_python_inc())
print("site_packages_dir:%s" % distutils.sysconfig.get_python_lib(plat_specific=1))
|
" Implementations of Reaction Network Pathway "
|
"""
SystemctlShow - command ``systemctl show``
==========================================
Parsers included in this module are:
SystemctlShowServiceAll - command ``systemctl show *.service``
--------------------------------------------------------------
Parsers the output of `systemctl show *.service` against all services running
on the host.
"""
from insights import parser, CommandParser
from insights.parsers import split_kv_pairs, SkipException, ParseException
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.systemctl_show_all_services)
class SystemctlShowServiceAll(CommandParser, dict):
"""
Class for parsing ``systemctl show *.service`` command output.
Empty properties are suppressed.
Sample Input::
Id=postfix.service
Names=postfix.service
TimeoutStartUSec=1min 30s
LimitNOFILE=65536
LimitMEMLOCK=
LimitLOCKS=18446744073709551615
Id=postgresql.service
Names=postgresql.service
Requires=basic.target
LimitMSGQUEUE=819200
LimitNICE=0
Sample Output::
{
"postfix.service": {
"Id" : "postfix.service",
"Names" : "postfix.service",
"LimitNOFILE" : "65536",
"TimeoutStartUSec" : "1min 30s",
"LimitLOCKS" : "18446744073709551615",
},
"postgresql.service": {
"Id" : "postgresql.service",
"Names" : "postgresql.service",
"Requires" : "basic.target",
"LimitMSGQUEUE" : "819200",
"LimitNICE" : "0",
}
}
Examples:
>>> 'postfix' in systemctl_show_all # ".service" is needed
False
>>> 'postfix.service' in systemctl_show_all
True
>>> systemctl_show_all['postfix.service']['Id']
'postfix.service'
>>> 'LimitMEMLOCK' in systemctl_show_all['postfix.service']
False
>>> systemctl_show_all['postfix.service']['LimitLOCKS']
'18446744073709551615'
>>> 'postgresql.service' in systemctl_show_all
True
>>> systemctl_show_all['postgresql.service']['LimitNICE']
'0'
Raises:
SkipException: When nothing needs to parse
ParseException: When something cannot be parsed
"""
def parse_content(self, content):
if not content:
raise SkipException
sidx = 0
idx_list = []
for i, l in enumerate(content):
if l.strip() == '':
idx_list.append((sidx, i))
sidx = i + 1
idx_list.append((sidx, len(content)))
for s, e in idx_list:
data = split_kv_pairs(content[s:e], use_partition=False)
name = data.get('Names', data.get('Id'))
if not name:
raise ParseException('"Names" or "Id" not found!')
self[name] = dict((k, v) for k, v in data.items() if v)
if len(self) == 0:
raise SkipException
class SystemctlShow(CommandParser, dict):
"""
.. warning::
This class is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for parsing ``systemctl show <Service_Name>`` command output.
Empty properties are suppressed.
Sample Input::
TimeoutStartUSec=1min 30s
LimitNOFILE=65536
LimitMEMLOCK=
LimitLOCKS=18446744073709551615
Sample Output::
{"LimitNOFILE" : "65536",
"TimeoutStartUSec" : "1min 30s",
"LimitLOCKS" : "18446744073709551615"}
In CMD's output, empty properties are suppressed by default.
"""
def __init__(self, *args, **kwargs):
deprecated(SystemctlShow, "Deprecated. Use 'SystemctlShowServiceAll' instead.")
super(SystemctlShow, self).__init__(*args, **kwargs)
def parse_content(self, content):
data = split_kv_pairs(content, use_partition=False)
"""Remove empty key"""
self.update(dict((k, v) for k, v in data.items() if not v == ''))
@property
def data(self):
return self
@parser(Specs.systemctl_cinder_volume)
class SystemctlShowCinderVolume(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show openstack-cinder-volume``.
Typical output of ``/bin/systemctl show openstack-cinder-volume`` command is::
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
WatchdogUSec=0
LimitCORE=18446744073709551615
LimitRSS=18446744073709551615
LimitNOFILE=65536
LimitAS=18446744073709551615
LimitNPROC=63391
Transient=no
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_cinder_volume["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_mariadb)
class SystemctlShowMariaDB(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show mariadb``.
Typical output of ``/bin/systemctl show mariadb`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStopUSec=5min
ExecStartPre={ path=/usr/libexec/mariadb-prepare-db-dir ; argv[]=/usr/libexec/mariadb-prepare-db-dir %n ; ignore_errors=no ; start_time=[Mon 2017-05-22 06:49:01 EDT] ; stop_time=[Mon 2017-05-22 06:49:02 EDT] ; pid=1946 ; code=exited ; status=0 }
ExecStart={ path=/usr/bin/mysqld_safe ; argv[]=/usr/bin/mysqld_safe --basedir=/usr ; ignore_errors=no ; start_time=[Mon 2017-05-22 06:49:02 EDT] ; stop_time=[n/a] ; pid=2304 ; code=(null) ; status=0/0 }
ExecStartPost={ path=/usr/libexec/mariadb-wait-ready ; argv[]=/usr/libexec/mariadb-wait-ready $MAINPID ; ignore_errors=no ; start_time=[Mon 2017-05-22 06:49:02 EDT] ; stop_time=[Mon 2017-05-22 06:49:12 EDT] ; pid=2305 ; code=exited ; status=0 }
Slice=system.slice
ControlGroup=/system.slice/mariadb.service
After=network.target -.mount systemd-journald.socket tmp.mount basic.target syslog.target system.slice
MemoryCurrent=18446744073709551615
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_mariadb["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_pulp_workers)
class SystemctlShowPulpWorkers(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show pulp_workers``.
Typical output of ``/bin/systemctl show pulp_workers`` command is::
Type=oneshot
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=0
TimeoutStopUSec=1min 30s
WatchdogUSec=0
WatchdogTimestampMonotonic=0
ExecMainStartTimestamp=Thu 2018-01-11 14:22:33 CST
ExecMainStartTimestampMonotonic=105521850
ExecMainExitTimestamp=Thu 2018-01-11 14:22:33 CST
ExecMainExitTimestampMonotonic=105593405
ExecStart={ path=/usr/libexec/pulp-manage-workers ; argv[]=/usr/libexec/pulp-manage-workers start ; ignore_err
ExecStop={ path=/usr/libexec/pulp-manage-workers ; argv[]=/usr/libexec/pulp-manage-workers stop ; ignore_error
Slice=system.slice
After=systemd-journald.socket system.slice network.target basic.target
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_pulp_workers["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_pulp_resmg)
class SystemctlShowPulpResourceManager(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show pulp_resource_manager``.
Typical output of ``/bin/systemctl show pulp_resource_manager`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:33 CST
ExecMainStartTimestampMonotonic=105028117
ExecMainExitTimestampMonotonic=0
ExecMainPID=2810
ExecMainCode=0
ExecMainStatus=0
ExecStart={ path=/usr/bin/celery ; argv[]=/usr/bin/celery worker -A pulp.server.async.app -n resource_manager@
Slice=system.slice
After=basic.target network.target system.slice -.mount systemd-journald.socket
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_pulp_resource_manager["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_pulp_celerybeat)
class SystemctlShowPulpCelerybeat(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show pulp_celerybeat``.
Typical output of ``/bin/systemctl show pulp_celerybeat`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:32 CST
ExecMainStartTimestampMonotonic=104261679
ExecMainExitTimestampMonotonic=0
ExecMainPID=2747
ExecMainCode=0
ExecMainStatus=0
ExecStart={ path=/usr/bin/celery ; argv[]=/usr/bin/celery beat --scheduler=pulp.server.async.scheduler.Schedul
Slice=system.slice
After=basic.target network.target system.slice -.mount systemd-journald.socket
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_pulp_celerybeat["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_httpd)
class SystemctlShowHttpd(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show httpd``.
Typical output of ``/bin/systemctl show httpd`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:32 CST
ExecMainStartTimestampMonotonic=104261679
ExecMainExitTimestampMonotonic=0
ExecMainPID=2747
ExecMainCode=0
ExecMainStatus=0
ExecStart={ path=/usr/sbin/httpd ; argv[]=/usr/sbin/httpd $OPTIONS -DFOREGROUND ; ignore_errors=no ; start_time=[Tue 2018-05-15 09:30:08 CST] ; stop_time=[n/a] ; pid=1605 ; code=(null) ; status=0/0 }
ExecReload={ path=/usr/sbin/httpd ; argv[]=/usr/sbin/httpd $OPTIONS -k graceful ; ignore_errors=no ; start_time=[Wed 2018-05-16 03:07:01 CST] ; stop_time=[Wed 2018-05-16 03:07:01 CST] ; pid=21501 ; code=exited ; status=0 }
ExecStop={ path=/bin/kill ; argv[]=/bin/kill -WINCH ${MAINPID} ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
Slice=system.slice
ControlGroup=/system.slice/httpd.service
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_httpd["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_nginx)
class SystemctlShowNginx(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show nginx``.
Typical output of ``/bin/systemctl show nginx`` command is::
Type=forking
Restart=no
PIDFile=/run/nginx.pid
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=5s
RuntimeMaxUSec=infinity
WatchdogUSec=0
WatchdogTimestampMonotonic=0
PermissionsStartOnly=no
RootDirectoryStartOnly=no
RemainAfterExit=no
GuessMainPID=yes
MainPID=0
ControlPID=0
FileDescriptorStoreMax=0
NFileDescriptorStore=0
StatusErrno=0
Result=success
UID=[not set]
GID=[not set]
NRestarts=0
ExecMainStartTimestampMonotonic=0
ExecMainExitTimestampMonotonic=0
ExecMainPID=0
ExecMainCode=0
ExecMainStatus=0
ExecStartPre={ path=/usr/bin/rm ; argv[]=/usr/bin/rm -f /run/nginx.pid ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
ExecStartPre={ path=/usr/sbin/nginx ; argv[]=/usr/sbin/nginx -t ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
ExecStart={ path=/usr/sbin/nginx ; argv[]=/usr/sbin/nginx ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
ExecReload={ path=/bin/kill ; argv[]=/bin/kill -s HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
...
Examples:
>>> systemctl_show_nginx["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_qpidd)
class SystemctlShowQpidd(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show qpidd``.
Typical output of ``/bin/systemctl show qpidd`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:32 CST
ExecMainStartTimestampMonotonic=104261679
ExecMainExitTimestampMonotonic=0
ExecMainPID=2747
ExecMainCode=0
ExecMainStatus=0
ExecStart={ path=/usr/sbin/qpidd ; argv[]=/usr/sbin/qpidd --config /etc/qpid/qpi
Slice=system.slice
ControlGroup=/system.slice/qpidd.service
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_qpidd["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_qdrouterd)
class SystemctlShowQdrouterd(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show qdrouterd``.
Typical output of ``/bin/systemctl show qdrouterd`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:32 CST
ExecMainStartTimestampMonotonic=104261679
ExecMainExitTimestampMonotonic=0
ExecMainPID=2747
ExecMainCode=0
ExecMainStatus=0
Slice=system.slice
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_qdrouterd["LimitNOFILE"]
'4096'
"""
pass
@parser(Specs.systemctl_smartpdc)
class SystemctlShowSmartpdc(SystemctlShow):
"""
.. warning::
This parser is deprecated, please use :py:class:`SystemctlShowServiceAll` instead.
Class for ``systemctl show smart_proxy_dynflow_core``.
Typical output of ``/bin/systemctl show smart_proxy_dynflow_core`` command is::
Type=simple
Restart=no
NotifyAccess=none
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
ExecMainStartTimestamp=Thu 2018-01-11 14:22:32 CST
ExecMainStartTimestampMonotonic=104261679
ExecMainExitTimestampMonotonic=0
ExecMainPID=2747
ExecMainCode=0
ExecMainStatus=0
Slice=system.slice
LimitNOFILE=4096
...
Examples:
>>> systemctl_show_smartpdc["LimitNOFILE"]
'4096'
"""
pass
|
from fastapi import APIRouter, Depends
from starlette.responses import JSONResponse
router = APIRouter()
@router.get(
"",
summary="Health Check",
tags=["Health Check"],
)
async def healthCheck():
return JSONResponse(status_code=200, content={
"status_code": 200,
"msg": "Health Check OK"
})
|
from celery.schedules import crontab
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_IMPORTS = ('openpds.questions.tasks',"openpds.questions.socialhealth_tasks", "openpds.questions.places_tasks", "openpds.meetup.tasks", "openpds.questions.probedatavisualization_tasks", "openpds.questions.mitfit_tasks", "openpds.questions.gfsa_tasks", "openpds.questions.auth_tasks", "openpds.questions.hotspots_tasks")
BROKER_URL = "mongodb://celery:celery@localhost:27017/lpd_celery_broker"
CELERYBEAT_SCHEDULE = {
# "check-data-and-notify": {
# "task": "openpds.questionstasks.checkDataAndNotify",
# "schedule": crontab(hour="*", minute="0")
# },
"compute-social-health-scores": {
"task": "openpds.questions.socialhealth_tasks.recentSocialHealthScores",
"schedule": crontab(hour="*", minute="0")
},
"ensure-funf-indexes": {
"task": "openpds.questions.tasks.ensureFunfIndexes",
"schedule": crontab(hour="*/2", minute="5")
},
"find-recent-places": {
"task": "openpds.questions.places_tasks.findRecentPlaces",
"schedule": crontab(hour="*/2", minute="15")
},
"find-hourly-places": {
"task": "openpds.questions.places_tasks.findHourlyPlaces",
"schedule": crontab(hour="*/2", minute="20")
},
"probe-summaries": {
"task": "openpds.questions.probedatavisualization_tasks.recentProbeDataScores",
"schedule": crontab(hour="*", minute="25")
},
"high-active-locations": {
"task": "openpds.questions.mitfit_tasks.findActiveLocationsTask",
"schedule": crontab(hour="*", minute="30")
},
"high-active-times": {
"task": "openpds.questions.mitfit_tasks.findActiveTimesTask",
"schedule": crontab(hour="*", minute="35")
},
"leaderboard-computation": {
"task": "openpds.questions.mitfit_tasks.leaderboardComputationTask",
"schedule": crontab(hour="*", minute="40")
},
"wifi-auth-fingerprints": {
"task": "openpds.questions.auth_tasks.computeAllFingerprints",
"schedule": crontab(hour="*/2", minute="45")
},
"compute-gfsa": {
"task": "openpds.questions.gfsa_tasks.recentGfsaScores",
"schedule": crontab(hour="*", minute="55")
},
"hotspots-computation": {
"task": "openpds.questions.hotspots_tasks.findHotSpotsTask",
"schedule": crontab(hour="*", minute="30")
},
}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Embedding Keras layer
~~~~~~~~~~~~~~~~~~~~~
"""
from functools import reduce
from operator import mul
from tensorflow.python.ipu.ops import embedding_ops
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
class Embedding(Layer):
"""
This is designed to be a replacement for the typical use cases of the
Keras Embedding layer.
Args:
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
# pylint: disable=useless-super-delegation
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
**kwargs):
kwargs['autocast'] = False
super(Embedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
@tf_utils.shape_type_conversion
def build(self, input_shape):
if len(input_shape) != 2:
raise ValueError(
"The input shape should be a tensor of shape [batch, input_length]")
self.embeddings = self.add_weight(shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings')
self.built = True
# pylint: disable=arguments-differ
def call(self, inputs):
"""
Perform an embedding lookup.
Args:
inputs: An integer tensor of indices into the embedding variable.
Returns:
The entries of the embedding tensor corresponding to the ids tensor
indices.
"""
return embedding_ops.embedding_lookup(self.embeddings,
ids=inputs,
name=self.name)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape + (self.output_dim,)
def get_config(self):
config = {
'input_dim':
self.input_dim,
'output_dim':
self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer)
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
from conans import ConanFile, CMake, tools
import os, subprocess
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "virtualrunenv"
requires = "ghc/8.10.1"
def build(self):
currentDirectory = os.path.dirname(os.path.realpath(__file__))
self.run("ghc -o example {}{}example.hs".format(currentDirectory,os.sep),run_environment=True)
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
if not tools.cross_building(self.settings):
output = subprocess.check_output([".{}example".format(os.sep)])
if not b'Hello, World!\n' in output:
raise Exception(output)
|
import os
import sys
import string
from PyQt5 import QtCore
self = sys.modules[__name__]
self._path = os.path.dirname(__file__)
self._current_task = None
class FormatDict(dict):
def __missing__(self, key):
return "{" + key + "}"
def resource(*path):
path = os.path.join(self._path, "res", *path)
return path.replace("\\", "/")
def schedule(task, delay=10):
"""Delay execution of `task` by `delay` milliseconds
As opposed to a plain `QTimer.singleShot`, this will also
ensure that only one task is ever queued at any one time.
"""
try:
self._current_task.stop()
except AttributeError:
# No task currently running
pass
timer = QtCore.QTimer()
timer.setSingleShot(True)
timer.timeout.connect(task)
timer.start(delay)
self._current_task = timer
def stream(stream):
for line in iter(stream.readline, ""):
yield line
def get_apps(project):
"""Define dynamic Application classes for project using `.toml` files
Args:
project (dict): project document from the database
Returns:
list: list of dictionaries
"""
import avalon.lib
import avalon.api as api
apps = []
for app in project["config"]["apps"]:
try:
app_definition = avalon.lib.get_application(app['name'])
except Exception as exc:
print("Unable to load application: %s - %s" % (app['name'], exc))
continue
# Get from app definition, if not there from app in project
icon = app_definition.get("icon", app.get("icon", "folder-o"))
color = app_definition.get("color", app.get("color", None))
order = app_definition.get("order", app.get("order", 0))
label = app.get("label", app_definition.get("label", app["name"]))
preactions = app_definition.get(
"preactions", app.get("preactions", None)
)
action = type(
"app_%s" % app["name"],
(api.Application,),
{
"name": app['name'],
"label": label,
"icon": icon,
"color": color,
"order": order,
"preactions": preactions,
"config": app_definition.copy()
}
)
apps.append(action)
return apps
def partial_format(s, mapping):
formatter = string.Formatter()
mapping = FormatDict(**mapping)
return formatter.vformat(s, (), mapping)
|
# import the necessary packages
import numpy as np
import base64
import sys
import cv2
def base64_encode_image(a):
# base64 encode the input NumPy array
return base64.b64encode(a)
def base64_decode_image(a,h):
img_ = base64.b64decode(a)
img = np.frombuffer(img_, dtype=np.uint8)
img= np.reshape(img,(-1,h,3))
return img
if __name__ == "__main__":
pass
# img=cv2.imread("69300_1950-05-11_2009.jpg")
# img=cv2.resize(img,(64,64))
# cv2.imshow("",img)
# cv2.waitKey(0)
# print(img.shape)
# jpg_as_text = base64.b64encode(img)
# print(jpg_as_text[:80])
# # Convert back to binary
# jpg_original = base64.b64decode(jpg_as_text)
# jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8)
# print(jpg_as_np.shape)
# jpg= np.reshape(jpg_as_np,(-1,64,3))
# cv2.imshow("",jpg)
# cv2.waitKey(0)
|
import bcrypt
def encrypt_password(password: str) -> str:
"""
Encryption on a password
:param password: the password to encrypt
:return: the hashed password
"""
return bcrypt.hashpw(password.encode("utf8"),
bcrypt.gensalt()).decode("utf8")
def decrypt_password(password: str, hashed_password: str) -> bool:
"""
Checks if the entered hashed password matches the hashed password in
database
:param password: the inputted password
:param hashed_password: the password hashed in database
:return: whether or not their hashes match
"""
return bcrypt.checkpw(password.encode("utf8"),
hashed_password.encode("utf8"))
|
# Copyright 2019 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import time
from flask import Flask, request, jsonify, g
from flask_api import status
from waitress import serve
from cortex.lib import util, Context, api_utils
from cortex.lib.log import cx_logger, debug_obj, refresh_logger
from cortex.lib.exceptions import CortexException, UserRuntimeException
app = Flask(__name__)
app.json_encoder = util.json_tricks_encoder
local_cache = {"ctx": None, "api": None, "class_set": set()}
@app.before_request
def before_request():
g.start_time = time.time()
@app.after_request
def after_request(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
if request.path != "/predict":
return response
api = local_cache["api"]
ctx = local_cache["ctx"]
cx_logger().info(response.status)
prediction = None
if "prediction" in g:
prediction = g.prediction
api_utils.post_request_metrics(
ctx, api, response, prediction, g.start_time, local_cache["class_set"]
)
return response
def prediction_failed(reason):
message = "prediction failed: {}".format(reason)
cx_logger().error(message)
return message, status.HTTP_406_NOT_ACCEPTABLE
@app.route("/healthz", methods=["GET"])
def health():
return jsonify({"ok": True})
@app.route("/predict", methods=["POST"])
def predict():
debug = request.args.get("debug", "false").lower() == "true"
try:
sample = request.get_json()
except:
return "malformed json", status.HTTP_400_BAD_REQUEST
api = local_cache["api"]
predictor = local_cache["predictor"]
try:
try:
debug_obj("sample", sample, debug)
output = predictor.predict(sample, api["predictor"]["metadata"])
debug_obj("prediction", output, debug)
except Exception as e:
raise UserRuntimeException(api["predictor"]["path"], "predict", str(e)) from e
except Exception as e:
cx_logger().exception("prediction failed")
return prediction_failed(str(e))
g.prediction = output
return jsonify(output)
@app.errorhandler(Exception)
def exceptions(e):
cx_logger().exception(e)
return jsonify(error=str(e)), 500
def start(args):
api = None
try:
ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id)
api = ctx.apis_id_map[args.api]
local_cache["api"] = api
local_cache["ctx"] = ctx
if api.get("predictor") is None:
raise CortexException(api["name"], "predictor key not configured")
local_cache["predictor"] = ctx.get_predictor_impl(api["name"], args.project_dir)
if util.has_function(local_cache["predictor"], "init"):
try:
model_path = None
if api["predictor"].get("model") is not None:
_, prefix = ctx.storage.deconstruct_s3_path(api["predictor"]["model"])
model_path = os.path.join(
args.model_dir, os.path.basename(os.path.normpath(prefix))
)
local_cache["predictor"].init(model_path, api["predictor"]["metadata"])
except Exception as e:
raise UserRuntimeException(api["predictor"]["path"], "init", str(e)) from e
finally:
refresh_logger()
except:
cx_logger().exception("failed to start api")
sys.exit(1)
cx_logger().info("init ran successfully")
if api.get("tracker") is not None and api["tracker"].get("model_type") == "classification":
try:
local_cache["class_set"] = api_utils.get_classes(ctx, api["name"])
except Exception as e:
cx_logger().warn("an error occurred while attempting to load classes", exc_info=True)
cx_logger().info("API is ready")
serve(app, listen="*:{}".format(args.port))
def main():
parser = argparse.ArgumentParser()
na = parser.add_argument_group("required named arguments")
na.add_argument("--workload-id", required=True, help="Workload ID")
na.add_argument("--port", type=int, required=True, help="Port (on localhost) to use")
na.add_argument(
"--context",
required=True,
help="S3 path to context (e.g. s3://bucket/path/to/context.json)",
)
na.add_argument("--api", required=True, help="Resource id of api to serve")
na.add_argument("--model-dir", required=True, help="Directory to download the model to")
na.add_argument("--cache-dir", required=True, help="Local path for the context cache")
na.add_argument("--project-dir", required=True, help="Local path for the project zip file")
parser.set_defaults(func=start)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pkg_resources
from cryptography.hazmat.backends.multibackend import MultiBackend
_available_backends_list = None
def _available_backends():
global _available_backends_list
if _available_backends_list is None:
_available_backends_list = [
# DeprecatedIn16
# setuptools 11.3 deprecated support for the require parameter to
# load(), and introduced the new resolve() method instead.
# We previously removed this fallback, but users are having issues
# where Python loads an older setuptools due to various syspath
# weirdness.
ep.resolve() if hasattr(ep, "resolve") else ep.load(require=False)
for ep in pkg_resources.iter_entry_points(
"cryptography.backends"
)
]
return _available_backends_list
_default_backend = None
def default_backend():
global _default_backend
if _default_backend is None:
_default_backend = MultiBackend(_available_backends())
return _default_backend
|
#
# Copyright 2017 , UT-Battelle, LLC
# All rights reserved
# [Home Assistant- VOLTTRON Integration, Version 1.0]
# OPEN SOURCE LICENSE (Permissive)
#
# Subject to the conditions of this License, UT-Battelle, LLC (the “Licensor”)
# hereby grants, free of charge, to any person (the “Licensee”) obtaining a copy
# of this software and associated documentation files (the "Software"), a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license
# to use, copy, modify, merge, publish, distribute, and/or sublicense copies of the
# Software.
#
# 1. Redistributions of Software must retain the above open source license grant,
# copyright and license notices, this list of conditions, and the disclaimer listed
# below. Changes or modifications to, or derivative works of the Software must be
# noted with comments and the contributor and organization’s name.
#
# 2. Neither the names of Licensor, the Department of Energy, or their employees may
# be used to endorse or promote products derived from this Software without their
# specific prior written permission.
#
# 3. If the Software is protected by a proprietary trademark owned by Licensor or the
# Department of Energy, then derivative works of the Software may not be distributed
# using the trademark without the prior written approval of the trademark owner.
#
#
#
# ****************************************************************************************************************
# DISCLAIMER
#
# UT-Battelle, LLC AND THE GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES,
# BOTH EXPRESSED AND IMPLIED. THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY
# OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY
# PATENT, COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL
# ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY
# OR DAMAGE. The user assumes responsibility for all liabilities, penalties, fines, claims,
# causes of action, and costs and expenses, caused by, resulting from or arising out of, in
# whole or in part the use, storage or disposal of the SOFTWARE.
#
# ****************************************************************************************************************
#
import logging
import sys
import json
import requests
from csv import DictReader
from StringIO import StringIO
from volttron.platform.vip.agent import Agent, Core, PubSub
from volttron.platform.agent import utils
from master_driver.interfaces import BaseInterface, BasicRevert, BaseRegister
from gevent.ares import result
utils.setup_logging()
_log = logging.getLogger(__name__)
class Register(BaseRegister):
def __init__(self, read_only, volttron_point_name, units, description, point_name):
super(Register, self).__init__("byte",
read_only,
volttron_point_name,
units,
description=description)
self.path = point_name
class Interface(BasicRevert, BaseInterface):
def __init__(self, **kwargs):
super(Interface, self).__init__(**kwargs)
self.data = []
self.url = ''
def configure(self, config_dict, registry_config_str=''):
self.url = config_dict['device_address']
self.hassClimate = HASSClimate(self.url)
self.GetData()
self.register_components_information()
def GetData(self):
'''
Get the current state for loaded components
from Home Assistant API
'''
urlStates = self.url+'states'
try:
self.data = requests.get(urlStates).json()
except requests.exceptions.RequestException as e:
print(e)
def register_components_information(self):
'''
Registers the information about components loaded on HASS API
'''
msg = []
self.GetData()
try:
if(self.data == []):
msg = "No data was received from HASS API, Please check the connection to the API and the Agent configuration file"
_log.error(msg)
else:
msg = []
for entry in self.data:
entityId = entry['entity_id']
################################################################################################################################################################
if(entityId.startswith("climate.")):
'''
registers data about the climate device with entity ID
'''
msg = entry['attributes']
climatePointName = 'climate#' + entityId + '#'
for key,value in msg.items():
if key == "away_mode":
self.register_device(False, climatePointName + key, 'string','Shows the away mode value')
elif key == "operation_mode":
self.register_device(False, climatePointName + key, 'string','Shows the operation mode value')
elif key == "fan_mode":
self.register_device(False, climatePointName + key, 'string','Shows the fan mode value')
elif key == "unit_of_measurement":
self.register_device(False, climatePointName + key, 'string','Shows the temperature unit of measurement')
elif key == "current_temperatuure":
self.register_device(False, climatePointName + key, 'float','Shows the current temperature value')
elif key == "aux_heat":
self.register_device(False, climatePointName + key, 'string','Shows the aux heat value value')
elif key == "max_temp":
self.register_device(False, climatePointName + key, 'float','Shows the max temperature value')
elif key == "min_temp":
self.register_device(False, climatePointName + key, 'float','Shows the min temperature value')
elif key == "temperature":
self.register_device(False, climatePointName + key, 'float','Shows the target temperature value')
elif key == "swing_mode":
self.register_device(False, climatePointName + key, 'string','Shows the swing mode value')
elif key == "target_temp_low":
self.register_device(False, climatePointName + key, 'float','Shows the target temperature low value')
elif key == "target_temp_high":
self.register_device(False, climatePointName + key, 'float','Shows the target temperature high value')
################################################################################################################################################################
except requests.exceptions.RequestException as e:
print(e)
def register_device(self, read_only, point_name, units,description):
'''
Register the information about the point name
'''
register = Register(
read_only,
point_name,
units,
description,
point_name)
self.insert_register(register)
def get_point(self, point_name, **kwargs):
'''
returns the value for the point_name
'''
pointNameInfo = point_name.split('#')
if(len(pointNameInfo) < 3):
_log.error("invalid point_name format")
return
val = self.get_point_name_value(pointNameInfo[0], point_name)
return str(val)
def get_point_name_value(self, component_type, point_name):
'''
Get the current value for loaded point_name
with component type loaded on Home Assistant API
'''
msg = []
self.GetData()
try:
if(self.data == []):
msg = "No data was received from HASS API, Please check the connection to the API and the Agent configuration file"
_log.error(msg)
else:
msg = []
pointNameInfo = point_name.split('#')
if(len(pointNameInfo) < 3):
_log.error("invalid point_name format")
return
entityId = pointNameInfo[1]
property= pointNameInfo[2]
for entry in self.data:
if entityId == entry['entity_id']:
deviceInfo = entry['attributes']
if(property in deviceInfo):
if(property == 'unit_of_measurement'):
return deviceInfo[property].encode('utf8')
return deviceInfo[property]
else:
return "N/A"
except requests.exceptions.RequestException as e:
print(e)
def _set_point(self, point_name, value, **kwargs):
'''
sets the value for the point_name
'''
pointNameInfo = point_name.split('#')
if(len(pointNameInfo) < 3):
_log.error("invalid point_name format")
return
componentType = pointNameInfo[0]
entityId = pointNameInfo[1]
property = pointNameInfo[2]
if (componentType == "climate"):
if property == "away_mode":
self.hassClimate.SetAwayMode(entityId, value)
return str(value)
elif property == "aux_heat":
self.hassClimate.SetAuxHeat(entityId,value)
return str(value)
elif property == "fan_mode":
self.hassClimate.SetFanMode(entityId, value)
elif property == "swing_mode":
self.hassClimate.SetSwingMode(entityId, value)
elif property == "operation_mode":
self.hassClimate.SetOperationMode(entityId, value)
elif property == "temperature":
pointNamePrefix = pointNameInfo[0] + '#' + pointNameInfo[1] + '#'
operation_mode = self.get_point(pointNamePrefix + 'operation_mode')
self.hassClimate.SetTargetTemperature(entityId, value, operation_mode)
return str(value)
elif property == "target_temp_low":
pointNamePrefix = pointNameInfo[0] + '#' + pointNameInfo[1] + '#'
operation_mode = self.get_point(pointNamePrefix + 'operation_mode')
self.hassClimate.SetSetPointLow(entityId, value, operation_mode)
return str(value)
elif property == "target_temp_high":
pointNamePrefix = pointNameInfo[0] + '#' + pointNameInfo[1] + '#'
operation_mode = self.get_point(pointNamePrefix + 'operation_mode')
self.hassClimate.SetSetPointHigh(entityId, value, operation_mode)
return str(value)
def _scrape_all(self):
results = {}
for point in self.point_map.keys():
print(point)
results[point] = self.get_point(point)
return results
class HASSClimate(object):
def __init__(self, url):
self.url = url
def SetTargetTemperature(self, entityId, targetTemp, opMode):
'''
Sets temperature value for target temperature
for the climate.entityId device in operation mode opMode
'''
if targetTemp is None or (targetTemp == "N/A"):
return
urlServices = self.url+'services/climate/set_temperature'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "temperature": targetTemp, "operation_mode": opMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetSetPointLow(self, entityId, setPointLow, opMode):
'''
Sets temperature value for set point low
for the climate.entityId device at operation mode
'''
if setPointLow is None or setPointLow == "N/A":
return
urlServices = self.url+'services/climate/set_temperature'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "target_temp_low":setPointLow, "operation_mode": opMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetSetPointHigh(self, entityId, setpointHigh, opMode):
'''
Sets temperature value for set point high for the climate.entityId device
in current operation mode
'''
if setpointHigh is None or setpointHigh == "N/A":
return
urlServices = self.url+'services/climate/set_temperature'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "target_temp_high": setpointHigh, "operation_mode": opMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetFanMode(self, entityId, fanMode):
'''
Sets fan mode value for the climate.entityId device
'''
if fanMode is None:
return
urlServices = self.url+'services/climate/set_fan_mode'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "fan": fanMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetOperationMode(self, entityId, opMode):
'''
Sets operation mode value for the climate.entityId device
'''
if opMode is None:
return
urlServices = self.url+'services/climate/set_operation_mode'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "operation_mode": opMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetAuxHeat(self, entityId, auxHeatOn):
'''
Turn aux heat on/ off for the climate.entityId device
'''
if auxHeatOn is None:
return
urlServices = self.url+'services/climate/set_aux_heat'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "aux_heat": auxHeatOn})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetAwayMode(self, entityId, awayMode):
'''
Sets away mode value for the climate.entityId device
'''
if awayMode is None:
return
urlServices = self.url+'services/climate/set_away_mode'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "away_mode": awayMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetHumidityValue(self, entityId, humidityValue):
'''
Sets the humidity value for the climate.entityId device
'''
if humidityValue is None:
return
urlServices = self.url+'services/climate/set_humidity'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "humidity": humidityValue})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
def SetSwingMode(self, entityId, swingMode):
'''
Sets swing mode value for the climate.entityId device
'''
if swingMode is None:
return
urlServices = self.url+'services/climate/set_swing_mode'
try:
jsonMsg = json.dumps({"entity_id" : entityId, "swing_mode": swingMode})
header = {'Content-Type': 'application/json'}
requests.post(urlServices, data = jsonMsg, headers = header)
except requests.exceptions.RequestException as e:
print(e)
|
# Global settings for photologue example project.
import os
DEBUG = TEMPLATE_DEBUG = True
# Top level folder - the one created by the startproject command.
TOP_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
ADMINS = ()
MANAGERS = ADMINS
# Default dev database is Sqlite. In production I use postgres.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(TOP_FOLDER, 'database.sql3')
}
}
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# TODO: setting this to True in Django 1.4 causes runtime warnings, when 1.4
# is end-of-lined in 2014 we can change this setting to True.
USE_TZ = False
MEDIA_ROOT = os.path.join(TOP_FOLDER, 'public', 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(TOP_FOLDER, 'public', 'static')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3p0f5q)l$=gt++#z0inpfh%bm_ujl6(-yogbzw2)(xea48@70d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'example_project.urls'
from photologue import PHOTOLOGUE_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/templates'),
PHOTOLOGUE_TEMPLATE_DIR
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
FIXTURE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/fixtures'),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'photologue',
'example_project',
'south',
]
SOUTH_TESTS_MIGRATE = False
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 19 13:47:01 2019
@author: abibeka
Read data from travel time files
"""
#0.0 Housekeeping. Clear variable space
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import os
import pandas as pd
import numpy as np
import glob
import datetime
# Use consistent naming in VISSIM
def TTSegName(x):
TTSeg= {1: 'EB',
2: 'WB',
3: 'EB LPGA (Tomoka Rd to I-95 SB Ramp)',
4: 'EB LPGA (I-95 SB Ramp to I-95 NB Ramp)',
5: 'EB LPGA (I-95 NB Ramp to Technology Blvd)',
6: 'EB LPGA (Technology Blvd to Willamson Blvd)',
7: 'EB LPGA (Willamson Blvd to Clyde-Morris Blvd)',
8: 'WB LPGA (Clyde-Morris Blvd to Willamson Blvd)',
9: 'WB LPGA (Willamson Blvd to Technology Blvd)',
10:'WB LPGA (Technology Blvd to I-95 NB Ramp)',
11:'WB LPGA (I-95 NB Ramp to I-95 SB Ramp)',
12:'WB LPGA (I-95 SB Ramp to Tomoka Rd)',
13:'SB I-95',
14:'NB I-95',
15:'SB I-95 (SR40 to SB OffRamp)',
16:'SB I-95 (SB OffRamp to SB LoopRamp)',
17:'SB I-95 (SB LoopRamp to SB On-Ramp)',
18:'SB I-95 (SB On-Ramp to US92)',
19:'NB I-95 (US92 to NB OffRamp)',
20:'NB I-95 (NB OffRamp to NB LoopRamp)',
21:'NB I-95 ( NB LoopRamp to NB On-Ramp)',
22:'NB I-95 (NB On-Ramp to SR40)'}
if(x<23):
Nm = TTSeg[x]
else: Nm = None
return Nm
# VISSIM File
#*********************************************************************************
PathToExist = r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\LPGA\July-6-2020\Models---July-2020\VISSIM\Existing'
ExistingPMfi = '20834_Existing_PM_Vehicle Travel Time Results.att'
ExistingPMfi = os.path.join(PathToExist,ExistingPMfi)
ExistingAMfi ='20834_Existing_AM_Vehicle Travel Time Results.att'
ExistingAMfi = os.path.join(PathToExist,ExistingAMfi)
def PreProcessVissimTT(file = ExistingAMfi):
# Define PM file. Would later use to figure out if a file for PM
# This is hard coding. Would break if the name of PM file is changed.
RefFile = ExistingPMfi
# Read VISSIM results
ExistingAMDat=pd.read_csv(file,sep =';',skiprows=17)
ExistingAMDat.columns
ExistingAMDat.rename(columns={'TRAVTM(ALL)':'VissimTT','VEHS(ALL)':'Veh'},inplace=True)
mask=ExistingAMDat["$VEHICLETRAVELTIMEMEASUREMENTEVALUATION:SIMRUN"]=="AVG"
ExistingAMDat = ExistingAMDat[mask]
ExistingAMDat["TTSegNm"]=ExistingAMDat['VEHICLETRAVELTIMEMEASUREMENT'].apply(TTSegName)
WB_TTSegs = ['WB LPGA (Clyde-Morris Blvd to Willamson Blvd)',
'WB LPGA (Willamson Blvd to Technology Blvd)',
'WB LPGA (Technology Blvd to I-95 NB Ramp)',
'WB LPGA (I-95 NB Ramp to I-95 SB Ramp)',
'WB LPGA (I-95 SB Ramp to Tomoka Rd)']
#'4500-5400','5400-6300','6300-7200','7200-8100' are peak periods
if (file ==ExistingPMfi):
mask1 = (ExistingAMDat.TIMEINT.isin(['4500-5400','5400-6300','6300-7200','7200-8100'])) & (~ExistingAMDat.TTSegNm.isin(WB_TTSegs))
#Can include '8100-9000' as WB TT Run includes 15 min after the peak
mask2 = (ExistingAMDat.TIMEINT.isin(['4500-5400','5400-6300','6300-7200','7200-8100','8100-9000'])) & (ExistingAMDat.TTSegNm.isin(WB_TTSegs))
mask = mask1 | mask2
else:
mask = ExistingAMDat.TIMEINT.isin(['4500-5400','5400-6300','6300-7200','7200-8100'])
ExistingAMDat = ExistingAMDat[mask]
# Get weighted average over the 4/5 intervals
ExistingAMDat.loc[:,'VissimTotalTT'] = ExistingAMDat.VissimTT * ExistingAMDat.Veh
ExistingAMDat = ExistingAMDat.groupby(['VEHICLETRAVELTIMEMEASUREMENT','TTSegNm'])['VissimTotalTT','Veh'].sum().reset_index()
ExistingAMDat.loc[:,'WeightedVissimTT'] = ExistingAMDat.VissimTotalTT/ExistingAMDat.Veh
return(ExistingAMDat)
ExistingAMDat = PreProcessVissimTT(ExistingAMfi)
ExistingPMDat = PreProcessVissimTT(ExistingPMfi)
#*********************************************************************************
#TTRun Files
#*********************************************************************************
def ProcessObsData(TimePeriod,VissimDat):
#Read the observed TT data
TTRunFile = os.path.join(r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\LPGA\VISSIM-Files\TTData-by-Intersection.xlsx')
x1 = pd.ExcelFile(TTRunFile)
x1.sheet_names
Dat = x1.parse(TimePeriod, index_col=0,nrows= 5,usecols=['ClosestInt', 'DistFromSpd', 'DistBtwPnt', 'TimeDiff', 'SMS_mph',
'SegName'])
#Merge with ViSSIM TT data
Dat = pd.merge(Dat,VissimDat,left_on=['ClosestInt'],right_on=['VEHICLETRAVELTIMEMEASUREMENT'], how ='outer')
Dat.loc[:,'DiffInTravelTime'] = Dat.TimeDiff- Dat.WeightedVissimTT
Dat.rename(columns={'TimeDiff':'ObsTravelTime'},inplace=True)
Dat = Dat[['TTSegNm','ObsTravelTime','WeightedVissimTT','DiffInTravelTime']]
Dat = Dat.round(1)
return(Dat)
FinTTDat = {}
FinTTDat['ExistingAM_EB_TT'] =ProcessObsData('AM_EB',ExistingAMDat)
FinTTDat['ExistingAM_WB_TT'] = ProcessObsData('AM_WB',ExistingAMDat).sort_index(ascending=False)
FinTTDat['ExistingPM_EB_TT'] = ProcessObsData('PM_EB',ExistingPMDat)
FinTTDat['ExistingPM_WB_TT'] = ProcessObsData('PM_WB',ExistingPMDat).sort_index(ascending=False)
#*********************************************************************************
# Write to excel
#*********************************************************************************
PathToKeyVal = r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\LPGA\July-6-2020\Models---July-2020\VISSIM'
OutFi = "Report-TT-GEH-Results.xlsx"
OutFi = os.path.join(PathToKeyVal,OutFi)
writer=pd.ExcelWriter(OutFi)
startrow1 = 1
for key,val in FinTTDat.items():
# Write tables on same sheet wih 2 row spacing
val.to_excel(writer,'TTResults', startrow = startrow1+3)
worksheet = writer.sheets['TTResults']
worksheet.cell(startrow1+2, 1, key)
startrow1 = worksheet.max_row
writer.save()
|
#Compound Selection Behavior
#https://kivy.org/docs/api-kivy.uix.behaviors.compoundselection.html
from kivy.uix.behaviors.compoundselection import CompoundSelectionBehavior
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.behaviors import FocusBehavior
from kivy.core.window import Window
from kivy.app import App
class SelectableGrid(FocusBehavior, CompoundSelectionBehavior, GridLayout):
def keyboard_on_key_down(self, window, keycode, text, modifiers):
"""Based on FocusBehavior that provides automatic keyboard
access, key presses will be used to select children.
"""
print("down keycode",keycode)
if super(SelectableGrid, self).keyboard_on_key_down(
window, keycode, text, modifiers):
print(True)
return True
if self.select_with_key_down(window, keycode, text, modifiers):
print(True)
return True
print(False)
return False
def keyboard_on_key_up(self, window, keycode):
"""Based on FocusBehavior that provides automatic keyboard
access, key release will be used to select children.
"""
print("up keycode",keycode)
if super(SelectableGrid, self).keyboard_on_key_up(window, keycode):
print(True)
return True
if self.select_with_key_up(window, keycode):
print(True)
return True
print(False)
return False
def add_widget(self, widget):
""" Override the adding of widgets so we can bind and catch their
*on_touch_down* events. """
widget.bind(on_touch_down=self.button_touch_down,
on_touch_up=self.button_touch_up)
return super(SelectableGrid, self).add_widget(widget)
def button_touch_down(self, button, touch):
""" Use collision detection to select buttons when the touch occurs
within their area. """
if button.collide_point(*touch.pos):
self.select_with_touch(button, touch)
def button_touch_up(self, button, touch):
""" Use collision detection to de-select buttons when the touch
occurs outside their area and *touch_multiselect* is not True. """
if not (button.collide_point(*touch.pos) or
self.touch_multiselect):
self.deselect_node(button)
def select_node(self, node):
print("node",node,'selected')
node.background_color = (1, 0, 0, 1)
super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
print("node",node,'deselected')
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def on_selected_nodes(self, gird, nodes):
print("Selected nodes = {0}".format(nodes))
class TestApp(App):
def build(self):
grid = SelectableGrid(cols=3, rows=2, touch_multiselect=True,
multiselect=True)
for i in range(0, 6):
grid.add_widget(Button(text="Button {0}".format(i)))
return grid
TestApp().run()
|
# -*- coding: utf-8 -*-
# Simulate standards and thin film specimens of ZnO on Si for
# measuring K-ratios for thin film measurement.
#
# For 500 trajectories at 4 kV:
# This script required 1.248 min
# Elapse: 0:01:14.9
#
# For 50000 trajectories at 4kV:
# This script required 144.035 min
# ...or 2.401 hr
# Elapse: 2:24:02.7
import sys
import os
import time
import shutil
import gov.nist.microanalysis.Utility as nu
import dtsa2.mcSimulate3 as mc3
import dtsa2.jmGen as jmg
# adjustable parameters
e0 = 4.0
det = findDetector("Oxford p4 05eV 2K")
lThNm = [ 0.1, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0,
70.0, 80.0, 90.0, 100.0, 110.0, 115.0, 120.0,
125.0, 130.0, 135.0, 140.0, 145.0, 150.0]
nTraj = 50000 # trajectories
dose = 5000 # nA-sec
nDigits = 6
# directories
gitDir = os.environ['GIT_HOME']
relPrj = "/dtsa2Scripts/utility"
prjDir = gitDir + relPrj
resDir = prjDir + '/sim-zno-on-si-4kV'
jmg.ensureDir(resDir)
rptDir = prjDir + '/sim-zno-on-si-4kV Results/'
DataManager.clearSpectrumList()
start = time.time()
zno = material("ZnO", density=5.61)
si = material("Si", density=2.33)
xrts = []
trs = mc3.suggestTransitions(zno, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
# xtraParams.update(mc3.configureOutput(simDir))
spc_zno_std = mc3.simulate(zno, det, e0, dose, True, nTraj, True, True, xtraParams)
sName = "ZnO std-%g-kV" % (e0)
spc_zno_std.rename(sName)
spc_zno_std.setAsStandard(zno)
spc_zno_std.display()
fi = resDir + "/"
fi += sName
fi += "-%g-Traj.msa" % (nTraj)
spc_zno_std.save(fi)
xrts = []
trs = mc3.suggestTransitions(si, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
# xtraParams.update(mc3.configureOutput(simDir))
spc_si_std = mc3.simulate(si, det, e0, dose, True, nTraj, True, True, xtraParams)
sName = "Si std-%g-kV" % (e0)
spc_si_std.rename(sName)
spc_si_std.setAsStandard(si)
spc_si_std.display()
fi = resDir + "/"
fi += sName
fi += "-%g-Traj.msa" % (nTraj)
spc_si_std.save(fi)
xrts = []
trs = mc3.suggestTransitions(zno, e0)
for tr in trs:
xrts.append(tr)
trs = mc3.suggestTransitions(si, e0)
for tr in trs:
xrts.append(tr)
xtraParams={}
xtraParams.update(mc3.configureXRayAccumulators(xrts,True, True, True))
stds = { element("O"): spc_zno_std, element("Zn"): spc_zno_std, element("Si"): spc_si_std }
print("Specimen Name \t\t Oxygen Ka1 K-Ratio \t Zinc La1 K-Ratio \t Si Ka K-Ratio ")
#5 nm ZnO on Si at 4 kV 0.073166 ± 0.000133 0.075862 ± 0.000106 0.877289 ± 0.000202
for thNm in lThNm:
thUm = thNm/1000.
spc = mc3.coatedSubstrate(zno, thUm, si, det, e0,
True, nTraj, dose, True,
True, xtraParams)
sName = "%g nm ZnO on Si at %g kV" % (thNm, e0)
spc.rename(sName)
spc.display()
fi = resDir + "/"
fi += sName
fi += "-%g-Traj.msa" % (nTraj)
spc.save(fi)
ff=epq.FilterFit(det,epq.ToSI.keV(e0))
ff.addReference(element("O"), spc_zno_std)
ff.addReference(element("Zn"), spc_zno_std)
ff.addReference(element("Si"), spc_si_std)
kr = ff.getKRatios(spc)
kO = kr.getKRatioU(epq.XRayTransition(epq.Element.O, epq.XRayTransition.KA1))
kZn = kr.getKRatioU(epq.XRayTransition(epq.Element.Zn,epq.XRayTransition.LA1))
kSi = kr.getKRatioU(epq.XRayTransition(epq.Element.Si,epq.XRayTransition.KA1))
print (u"%s\t%g \u00B1 %g\t%g \u00B1 %g\t%g \u00B1 %g" % ( sName,
round(kO.doubleValue(), nDigits ),
round(kO.uncertainty(), nDigits ),
round(kZn.doubleValue(), nDigits ),
round(kZn.uncertainty(), nDigits ),
round(kSi.doubleValue(), nDigits ),
round(kSi.uncertainty(), nDigits )))
# kr = kratios(spc, stds, refs={})
# print(kr[0])
# clean up cruft
shutil.rmtree(rptDir)
print "Done!"
end = time.time()
delta = (end-start)/60
msg = "This script required %.3f min" % delta
print msg
if(delta > 60):
delta = delta/60
msg = "...or %.3f hr" % delta
print msg
|
"""See https://docs.python.org/3/library/unittest.html#assert-methods
for more information about 'unittest' framework.
"""
import unittest
class TestEmptyCM(unittest.TestCase):
"""Class with all the unit test made on the CM."""
def test_data(self):
"""Function performing a unit test."""
pass
if __name__ == "__main__":
unittest.main()
|
""" Helper functions for the test suite
"""
# Standard library imports
import json
import sys
# Local imports
from spotifython.album import Album
from spotifython.artist import Artist
from spotifython.playlist import Playlist
from spotifython.track import Track
from spotifython.user import User
#pylint: disable=wrong-import-position
sys.path.append('../spotifython')
import spotifython.constants as const
BASE_PATH = 'tests/dummy_data/'
FILE_MAPPINGS = {
const.ALBUMS: 'albums.json', # 100 total
const.ARTISTS: 'artists.json', # 336 total
const.PLAYLISTS: 'playlists.json', # 38 total
const.TRACKS: 'tracks.json', # 205 total
const.USERS: 'users.json' # 200 total
}
CLASS_MAPPINGS = {
const.ALBUMS: Album,
const.ARTISTS: Artist,
const.PLAYLISTS: Playlist,
const.TRACKS: Track,
const.USERS: User
}
def get_dummy_data(data_type: str,
limit: int = 50,
to_obj: bool = False):
""" Helper function for the test suite to get dummy data
Dummy data is taken by pinging the Spotify REST api and saving the response
Args:
data_type: the type of data supported.
limit: the maximum number of items to return. May return less
to_obj: if True, will construct the objects
if False, will return raw jsons
Returns:
A list of json objects from Spotify, each for a data_type object.
"""
if data_type not in [const.ARTISTS,
const.ALBUMS,
const.PLAYLISTS,
const.USERS,
const.TRACKS]:
raise TypeError(data_type)
with open(BASE_PATH + FILE_MAPPINGS[data_type], 'r') as fp:
result = json.load(fp)
result = result['items'][:limit]
if to_obj:
map_func = lambda elem: CLASS_MAPPINGS[data_type](None, elem)
result = [map_func(elem) for elem in result]
return result
|
## Sid Meier's Civilization 4
from CvPythonExtensions import *
import CvUtil
import ScreenInput
import CvScreenEnums
import math
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
class TechTree:
"Creates Tech Tree"
def __init__ (self):
self.dIsRequiredPrereqFor = {}
self.dIsOptionalPrereqFor = {}
self.dRequiredPrereqTechs = {}
self.dOptionalPrereqTechs = {}
for iLoopTech in range (gc.getNumTechInfos()):
RequiredPrereqs = []
OptionalPrereqs = []
for k in range(gc.getNUM_OR_TECH_PREREQS()):
eOrPrereq = gc.getTechInfo (iLoopTech).getPrereqOrTechs(k)
if eOrPrereq != -1:
OptionalPrereqs.append (eOrPrereq)
for k in range(gc.getNUM_AND_TECH_PREREQS()):
eAndPrereq = gc.getTechInfo (iLoopTech).getPrereqAndTechs(k)
if eAndPrereq != -1:
RequiredPrereqs.append (eAndPrereq)
if len(OptionalPrereqs) == 1:
RequiredPrereqs.extend (OptionalPrereqs)
self.dOptionalPrereqTechs [iLoopTech] = []
else:
self.dOptionalPrereqTechs [iLoopTech] = OptionalPrereqs
self.dRequiredPrereqTechs [iLoopTech] = RequiredPrereqs
for iTech in range (gc.getNumTechInfos()):
self.dRequiredPrereqFor = []
self.dOptionalPrereqFor = []
for iLoopTech in range(gc.getNumTechInfos()):
if iTech in self.dRequiredPrereqTechs[iLoopTech]:
self.dRequiredPrereqFor.append (iLoopTech)
elif iTech in self.dOptionalPrereqTechs [iLoopTech]:
self.dOptionalPrereqFor.append (iLoopTech)
self.dIsRequiredPrereqFor[iTech] = self.dRequiredPrereqFor
self.dIsOptionalPrereqFor[iTech] = self.dOptionalPrereqFor
# CvUtil.pyPrint ("%s" % self.dIsRequiredPrereqFor)
def RequiredLeadsTo (self, iTech):
return self.dIsRequiredPrereqFor[iTech]
def OptionalLeadsTo (self, iTech):
return self.dIsOptionalPrereqFor[iTech]
def RequiredPrereqs (self, iTech):
return self.dRequiredPrereqTechs[iTech]
def OptionalPrereqs (self, iTech):
return self.dOptionalPrereqTechs[iTech]
|
import torch
import torch.onnx
import torchvision.models as models
from torch import nn
from torch.autograd import Variable
from resnet import resnet18
from vgg import vgg16_bn
import onnx as nx
def main():
device = torch.device('cpu')
model = vgg16_bn()
# PATH = 'weights/resnet18_acc_89.078.pt'
PATH = 'weights/imagenet_vgg16_acc_92.096.pt'
model_state = torch.load(PATH, map_location=device)
new_model_state = {}
""" cuda trained models need to be loaded with the nn
DataParallel module. prefix incompatibility is caused by
the module key word
"""
for key in model_state.keys():
new_model_state[key[7:]] = model_state[key]
try:
model.load_state_dict(new_model_state)
except Exception as e:
print("Could not load pytorch model")
return
model.load_state_dict(new_model_state)
# convert torch to onnx
input_size = Variable(torch.randn(1, 3, 224, 224, device='cpu'))
file_name = PATH.split('/')[1].rstrip('.pt')
torch.onnx.export(model, input_size, "resources/{0:}.onnx".format(file_name), export_params=True, keep_initializers_as_inputs=True, verbose=True, input_names=["input_1"], output_names=["output_1"])
onnx_model = nx.load("resources/{0:}.onnx".format(file_name))
nx.checker.check_model(onnx_model)
print("model has been verified")
if __name__ == '__main__':
main()
|
import pandas as pd
import numpy as np
import math
import os, sys
import random
import tensorflow as tf
from sklearn.model_selection import train_test_split
import dataset_utils
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('output_dir',
'./airbus/tfrecords',
'outupt tfrecords directory')
tf.app.flags.DEFINE_string('image_folder',
None,
'Folder containing images.')
tf.app.flags.DEFINE_string('csv_file',
None,
'segmentation csv file')
tf.app.flags.DEFINE_integer('num_shards',
50,
'number of shards for train/val set')
"""
Train size: 42556 - 1000 = 41556
Val size: 1000
Total size: 192556 including noship (empty) images
"""
_SEED = 97
_IMAGE_SIZE = 768
_NUM_CHANNELS = 3
# The names of the classes.
_CLASS_NAMES = [
'noship',
'ship'
]
masks_rle = pd.read_csv(FLAGS.csv_file)
def _extract_image(filename):
"""Extract the images into a numpy array.
Args:
filename: The path to an airbus images file.
Returns:
A binary format of image jpg string
"""
# print('Extracting images from: ', filename)
return tf.gfile.FastGFile(filename, 'rb').read()
def _extract_label(imageId):
"""Extract label for the given image id
Returns:
A numpy array of shape [number_of_labels]
"""
# print('Extracting labels for image: ', imageId)
mask = masks_rle.loc[masks_rle['ImageId'] == imageId, 'EncodedPixels'].dropna().tolist()
if len(mask) == 0:
return 0
else:
return 1
def _prepare_filenames(image_folder, dataset_split, seed):
filenames = os.listdir(image_folder)
# perform train-val split based the same constant seed number
train_imgs, val_imgs = train_test_split(filenames, test_size=0.1, random_state=seed)
if dataset_split == 'train':
filenames = train_imgs
elif dataset_split == 'val':
filenames = val_imgs
else:
raise TypeError('Unknown dataset split')
tf.logging.info('dataset split: {} dataset size: {}'.format(dataset_split, len(filenames)))
tf.logging.info('dataset files are like: {}'.format(filenames[:5]))
return filenames
def _convert_dataset(dataset_split):
filenames = _prepare_filenames(FLAGS.image_folder, dataset_split, _SEED)
num_images = len(filenames)
num_shards = FLAGS.num_shards
num_per_shard = int(math.ceil(num_images / float(num_shards)))
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=(_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS))
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for shard_id in range(num_shards):
output_filename = os.path.join(FLAGS.output_dir,'%s-%05d-of-%05d.tfrecord' % (dataset_split, shard_id, num_shards))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, len(filenames), shard_id))
sys.stdout.flush()
image_filename = os.path.join(FLAGS.image_folder, filenames[i])
image_id = filenames[i]
img_data = _extract_image(image_filename)
label = _extract_label(image_id)
# png_string = sess.run(encoded_png, feed_dict={image: img})
example = dataset_utils.image_to_tfexample(img_data, 'jpeg'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, label)
tfrecord_writer.write(example.SerializeToString())
print('Finished processing set: ', dataset_split, ' coverted images: ', num_images)
def main(unused_argv):
if len(os.listdir(FLAGS.output_dir)) != 0:
raise RuntimeError('Remember to clear output dir: {} before you run this'.format(FLAGS.output_dir))
_convert_dataset('train')
_convert_dataset('val')
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, FLAGS.output_dir)
print('finished converting airbus dataset')
if __name__ == '__main__':
tf.app.run()
|
#!python3
import os, sys
import queue
import shlex
import threading
import time
import zmq
from . import config
from . import logging
log = logging.logger(__package__)
from . import outputs
class RobotError(BaseException): pass
class NoSuchActionError(RobotError): pass
class Robot(object):
def __init__(
self,
output,
stop_event=None,
listen_on_ip=config.LISTEN_ON_IP, listen_on_port=config.LISTEN_ON_PORT
):
log.info("Setting up Robot on %s:%s", listen_on_ip, listen_on_port)
log.info("Outputting to %s", output)
self.stop_event = stop_event or threading.Event()
self._init_socket(listen_on_ip, listen_on_port)
self.output = output
self.output._init()
def _init_socket(self, listen_on_ip, listen_on_port):
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind("tcp://%s:%s" % (listen_on_ip, listen_on_port))
def get_command(self):
"""Attempt to return a unicode object from the command socket
If no message is available without blocking (as opposed to a blank
message), return None
"""
try:
message_bytes = self.socket.recv(zmq.NOBLOCK)
log.debug("Received message: %r", message_bytes)
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
return None
else:
raise
else:
return message_bytes.decode(config.CODEC)
def send_response(self, response):
"""Send a unicode object as reply to the most recently-issued command
"""
response_bytes = response.encode(config.CODEC)
log.debug("About to send reponse: %r", response_bytes)
self.socket.send(response_bytes)
def parse_command(self, command):
"""Break a multi word command up into an action and its parameters
"""
words = shlex.split(command.lower())
return words[0], words[1:]
def dispatch(self, command):
"""Pass a command along with its params to a suitable handler
If the command is blank, succeed silently
If the command has no handler, succeed silently
If the handler raises an exception, fail with the exception message
"""
log.info("Dispatch on %s", command)
if not command:
return "OK"
action, params = self.parse_command(command)
log.debug("Action = %s, Params = %s", action, params)
try:
function = getattr(self, "do_" + action, None)
if function:
function(*params)
return "OK"
except KeyboardInterrupt:
raise
except Exception as exc:
log.exception("Problem executing action %s", action)
return "ERROR: %s" % exc
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params)
def do_finish(self):
self.stop_event.set()
#
# Main loop
#
def start(self):
while not self.stop_event.is_set():
try:
command = self.get_command()
if command is not None:
response = self.dispatch(command.strip())
self.send_response(response)
except KeyboardInterrupt:
log.warn("Closing gracefully...")
self.stop_event.set()
break
except:
log.exception("Problem in main loop")
self.stop_event.set()
raise
def main(args):
output = args.output
if not hasattr(outputs, args.output):
raise RuntimeError("Invalid output: %s" % args.output)
else:
output = getattr(outputs, args.output)
robot = Robot(output=output)
robot.start()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", default="text")
args = parser.parse_args()
sys.exit(main(args))
|
#!/usr/bin/env python3
import re
from reporter.connections import RedcapInstance
from reporter.emailing import (
RECIPIENT_CVLPRIT_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_CVLPRIT_MANAGER as RECIPIENT_MANAGER,
)
from reporter.application_abstract_reports.redcap.data_quality import (
RedcapFieldMatchesRegularExpression,
RedcapInvalidDate,
RedcapInvalidHeightInM,
RedcapInvalidWeightInKg,
RedcapInvalidBmi,
RedcapInvalidBloodPressure,
)
REDCAP_PROJECT_ID = 18
class CvlpritRedcapRecordId(RedcapFieldMatchesRegularExpression):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
fields=['record_id'],
regular_expression='^\d{1,4}$',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapGlobalPatientId(RedcapFieldMatchesRegularExpression):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
fields=['patient_id'],
regular_expression='^X\d{3}$',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapLocalSitePatientId(RedcapFieldMatchesRegularExpression):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
fields=['local_id'],
regular_expression='\d{1,3}',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapInvalidDate(RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapInvalidHeightInM(RedcapInvalidHeightInM):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
fields=['height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapInvalidWeightInKg(RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
fields=['weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class CvlpritRedcapInvalidBloodPressure(RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.uol_lamp,
project_id=REDCAP_PROJECT_ID,
systolic_field_name=['systolic_bp'],
diastolic_field_name=['diastolic_bp'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
|
"""Unit tests for numbering.py."""
import itertools
import unittest
import pmix.numbering as numbering
class NumberingFormatTest(unittest.TestCase):
"""Test the string format for fixed numberings."""
def match_re(self, prog, expr, match):
found = prog.match(expr)
if match:
msg = 'Expected "{}" to be accepted.'.format(expr)
self.assertIsNotNone(found, msg=msg)
else:
msg = 'Expected "{}" not to be accepted.'.format(expr)
self.assertIsNone(found, msg=msg)
def test_upper_re(self):
"""Regex-ify uppercase numbering."""
this_prog = numbering.Numbering.letter_prog
good = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for item in good:
self.match_re(this_prog, item, True)
bad_single = tuple("ÁÉ01")
other_bad = ("", "AA", "A1", "1A", "_", "-", "A.", " A", "A ")
for item in itertools.chain(bad_single, other_bad):
self.match_re(this_prog, item, False)
def test_number_re(self):
"""Regex-ify pure number numbering."""
this_prog = numbering.Numbering.number_prog
good = ("001", "101", "201", "LCL_301", "A1", "FLW801", "PHC105", "1")
for item in good:
self.match_re(this_prog, item, True)
bad = ("", "001a", "10.", "_", "-", "SN_101i")
for item in bad:
self.match_re(this_prog, item, False)
def test_ext_letter_re(self):
"""Regex-ify extended numbering with letter."""
this_prog = numbering.Numbering.ext_letter_prog
good = ("001a", "PHC101d", "101z", "LCL_308a", "SN_101.i")
for item in good:
self.match_re(this_prog, item, True)
bad = ("001", "A", "", "_", "-", "PHC101.ix", "101a.", " 101a", "1a ")
for item in bad:
self.match_re(this_prog, item, False)
def test_ext_roman_re(self):
"""Regex-ify extended numbering with roman numeral."""
this_prog = numbering.Numbering.ext_roman_prog
good = (
"1a.i",
"2b.ii",
"3c.iii",
"4d:iv",
"5e_v",
"6f-vi",
"7g)vii",
"8h_viii",
"9i_ix",
"10j_x",
"1.a.i",
)
for item in good:
self.match_re(this_prog, item, True)
bad = ("", "_", "-", "001", "A", "2ii", "1iiii", "25y", "1i ", " 2ii", "21av")
for item in bad:
self.match_re(this_prog, item, False)
def test_decompose(self):
"""Decompose numbering."""
answers = (
("a", "a", "", "", "", "", "", ""),
("A", "A", "", "", "", "", "", ""),
("001", "", "", "001", "", "", "", ""),
("001a", "", "", "001", "", "a", "", ""),
("001.a", "", "", "001", ".", "a", "", ""),
("3a.iii", "", "", "3", "", "a", ".", "iii"),
("PHC101-i", "", "PHC", "101", "-", "i", "", ""),
("FLW801", "", "FLW", "801", "", "", "", ""),
("LCL_101", "", "LCL_", "101", "", "", "", ""),
)
for expr, let, lead, number, p0, low, p1, rom in answers:
msg = 'Working with "{}"'.format(expr)
num = numbering.Numbering(expr)
self.assertEqual(let, num.letter, msg=msg)
self.assertEqual(lead, num.leader, msg=msg)
self.assertEqual(number, num.number, msg=msg)
self.assertEqual(p0, num.punc0, msg=msg)
self.assertEqual(low, num.lower, msg=msg)
self.assertEqual(p1, num.punc1, msg=msg)
self.assertEqual(rom, num.roman, msg=msg)
class NumberingIncrementTest(unittest.TestCase):
"""Test numbering increments."""
def test_naive_number_increment(self):
"""A naive test of incrementing."""
num = numbering.Numbering("001")
num.increment("^1")
self.assertEqual(str(num), "002")
num.increment("^2")
self.assertEqual(str(num), "004")
num.increment("^a")
self.assertEqual(str(num), "004a")
num.increment("^1")
self.assertEqual(str(num), "005")
num.increment("^1a")
self.assertEqual(str(num), "006a")
num.increment("^1a")
self.assertEqual(str(num), "007a")
num = numbering.Numbering("101")
num.increment("^1")
self.assertEqual(str(num), "102")
def compare_chains(self, chains):
"""Compare commands to answers in batches."""
for chain, answers in chains:
context = numbering.NumberingContext()
for cmd, answer in zip(chain, answers):
context.next(cmd)
num_now = context.numbers[-1].to_string()
msg = "Mistake on chain {}".format(chain)
self.assertEqual(num_now, answer, msg=msg)
def compare_chains_entirely(self, chains):
"""Compare chains in their entirety."""
for chain, answers in chains:
context = numbering.NumberingContext()
for cmd in chain:
context.next(cmd)
results = tuple(context.string_iter())
self.assertEqual(results, answers)
def test_increment_lookback(self):
"""Increment and lookback operators and their interplay."""
chains = (
(
("001", "^1", "^1", "^1", "<", "<", "^1a", "^a"),
("001", "002", "003", "004", "004", "004", "005a", "005b"),
),
(
("101", "^1", "^1", "^1", "<", "<", "^1a", "^a"),
("101", "102", "103", "104", "104", "104", "105a", "105b"),
),
(
("101", "^1a", "^a", "^1", "201", "<", "^a", "<"),
("101", "102a", "102b", "103", "201", "201", "201a", "201a"),
),
(("323a", "^a", "<2", "<2"), ("323a", "323b", "323a", "323b")),
(("323a", "^1a", "<2^a", "<2^a"), ("323a", "324a", "323b", "324b")),
(("001a", "<^i"), ("001a", "001a.i")),
(("711a.ii", "<^ai"), ("711a.ii", "711b.i")),
)
self.compare_chains(chains)
def test_letter_increment(self):
"""Increment upper and lower case letters."""
chains = (
(("A", "^A", "^A"), ("A", "B", "C")),
(("a", "^a", "^a"), ("a", "b", "c")),
)
self.compare_chains(chains)
def test_all_increment(self):
"""Increment with ^1ai."""
chains = ((("1", "^1ai"), ("1", "2a.i")),)
self.compare_chains(chains)
def test_sticky(self):
"""Sticky operator correctness."""
chains = (
(
("PHC101", "^1", "#LCL_301", "^1"),
("PHC101", "PHC102", "LCL_301", "PHC103"),
),
(("BF012", "^1", "#NS012", "^1"), ("BF012", "BF013", "NS012", "BF014")),
(("001a", "#099", "101a"), ("001a", "099", "101a")),
)
self.compare_chains(chains)
def test_blanks(self):
"""Blanks mixed in with commands."""
chains = (
(("", "001a", "", "^1"), ("", "001a", "", "002")),
(
("PHC_101", "^a", "", "#LCL_100", "", "^1"),
("PHC_101", "PHC_101a", "", "LCL_100", "", "PHC_102"),
),
)
self.compare_chains_entirely(chains)
def test_silent(self):
"""Silent numbers."""
chains = (
(("", "~000", "^1", "^1a"), ("", "", "001", "002a")),
(("~PHC100", "", "^1a", "^a"), ("", "", "PHC101a", "PHC101b")),
)
self.compare_chains_entirely(chains)
def test_resume(self):
"""Resume previous series."""
chains = (
(("PHC101", "a", "^a", "*^1a"), ("PHC101", "a", "b", "PHC102a")),
(("~000", "^1", "a", "^a", "*<"), ("", "001", "a", "b", "001")),
)
self.compare_chains_entirely(chains)
|
from base import InternedCollection
from base import InternedNamedCollection
from chat import ChatServer
from .discordchatchannel import DiscordChatChannel
from .discorduser import DiscordUser
class DiscordPrivateMessagingServer(ChatServer):
def __init__(self, chatService, client):
super(DiscordPrivateMessagingServer, self).__init__(chatService)
self.client = client
self._channels = InternedNamedCollection(lambda x: DiscordChatChannel(self.chatService, self, x), lambda x: x.id)
self._users = InternedCollection(lambda x: DiscordUser(self.chatService, self, x), lambda x: x.id)
# IServer
# Identity
@property
def id(self): return "PM"
@property
def globalId(self): return "Discord." + self.id
@property
def name(self): return "Private Messages"
# Server
# Channels
@property
def channelCount(self):
return len(self.channels)
@property
def channels(self):
self.updateChannels()
return self._channels
def getChannelById(self, id):
return self.channels.get(id)
def getChannelByName(self, name):
return self.channels.getByName(name)
# Users
@property
def userCount(self):
return len(self.users)
@property
def users(self):
return self._users
def getUserById(self, id):
return self.usersById.get(id)
@property
def localUser(self):
return self.users.intern(self.client.user)
# DiscordPrivateMessagingServer
# Internal
def updateChannels(self):
self._channels.update(self.client.private_channels)
|
from .api_resource import ApiResource
from .defaults import * # flake8: noqa
class ReportType(object):
IdentityReport = "identity"
DocumentReport = "document"
EmploymentReport = "employment"
EducationReport = "education"
NegativeMediaReport = "negative_media"
DirectorshipReport = "directorship"
CriminalRecordReport = "criminal_history"
PEPSanctionReport = "watchlist"
AntiMoneyLaunderingReport = "anti_money_laundering"
StreetLevelReport = "street_level"
SexOffenderReport = "sex_offender"
WatchlistReport = "watchlist"
NationalCriminalReport = "national_criminal"
EvictionReport = "eviction"
CountyCriminalReport = "county_criminal"
DrivingRecord = "Report driving_record"
class Reports(ApiResource):
def find(self, check_id, report_id):
return self.get("checks/{0}/reports/{1}".format(check_id, report_id))
def all(self, check_id):
return self.get("checks/{0}/reports".format(check_id))
|
"""An unofficial Python wrapper for coinzo exchange Rest API
.. moduleauthor:: tolgamorf
"""
name = "coinzo"
|
from lib.Evaluator_line import *
from lib.utils import *
import matplotlib.pyplot as plt
import os
import numpy as np
import scipy.io as sio
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import interpolate
import sys
mpl.rcParams.update({"font.size": 12})
plt.rcParams["font.family"] = "Times New Roman"
del mpl.font_manager.weight_dict["roman"]
mpl.font_manager._rebuild()
plt.figure(figsize=(5, 4))
save_path = 'result/'
TP_LSD = 'result/TP-LSD-wire-0.5.npz'
path = [TP_LSD]
label = ['TP-LSD']
color = ['slateblue']
i=0
for p, l in zip(path, label):
evaluator = Evaluator(None)
evaluator.PlotPrecisionRecallCurveFromNPZ(
p, # Object containing all bounding boxes (ground truths and detections)
method=MethodAveragePrecision.EveryPointInterpolation, # As the official matlab code
showAP=True, # Show Average Precision in the title of the plot
showInterpolatedPrecision=False,
label=l, color=color[i]) # Plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.results
print("Average precision values per class:\n")
# Loop through classes to obtain their metrics
i+= 1
for mc in metricsPerClass:
# Get metric values per each class
c = mc['class']
precision = mc['precision']
recall = mc['recall']
average_precision = mc['AP']
ipre = mc['interpolated precision']
irec = mc['interpolated recall']
# Print AP per class
print('%s: %f' % (c, average_precision))
f_scores = np.linspace(0.2, 0.8, num=8)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color="green", alpha=0.3)
plt.annotate("f={0:0.1}".format(f_score), xy=(0.9, y[45] + 0.02), alpha=0.4)
plt.grid(True)
plt.axis([0.0, 1.0, 0.0, 1.0])
plt.xticks(np.arange(0, 1.0, step=0.1))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.yticks(np.arange(0.0, 1.0, step=0.1))
plt.legend(loc=1)
#plt.title("PR Curve for Heatmap in Wireframe dataset")
plt.savefig(save_path + "/wire_lap.pdf", format="pdf", bbox_inches="tight")
plt.show()
|
import bokeh.io
import bokeh_garden
import bokeh.models.sources
from bokeh.models import Div
import logging
class LoggingBG(bokeh_garden.application.AppWidget, bokeh.models.layouts.Column):
def __init__(self, app, **kw):
self._app = app
self._records = ''
self._queue = []
self._logtext = Div(text='', style={"overflow": "auto", "width": "100%"}, **kw)
self._link = bokeh_garden.download.Download(
text="Download log",
filename="log.txt",
content=DownloadContent(self))
bt = bokeh.models.widgets.Button(label='Clear log')
bokeh.models.layouts.Column.__init__(self, self._logtext, self._link, bt, **kw)
bt.on_click(self.clear_log)
def _attach_document(self, doc):
res = super(LoggingBG, self)._attach_document(doc)
bokeh_garden.logging_handler.LoggingHandler.register_widget(self)
doc.add_periodic_callback(lambda : self._handle_queue(), 500)
return res
def _handle_queue(self):
queue = self._queue
self._queue = []
if queue:
self._records = self._records + "\n".join(queue) + "\n"
self._logtext.text = "<pre>%s</pre>" % self._records
def add_record(self, record):
self._queue.append(record)
def clear_log(self):
self._records = ''
self._logtext.text = ''
class DownloadContent(object):
def __init__(self, widget):
self.widget = widget
def __bytes__(self):
f = self.widget._records
return f.encode("utf-8")
|
# -*- coding: utf-8 -*-
from torch import optim
from torch import tensor,save
from torch import cuda
from torch.nn.utils import clip_grad_value_
from dataloader import read_data,DataLoader,load_init
from cdkt import CDKT
use_cuda = True
if use_cuda:
cuda.empty_cache()
""" training mode"""
results = []
f = 3
model = CDKT()
if use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(),5*1e-4)
DL = DataLoader(read_data(f'/data/train.{f}.dat'),load_init())
for r in range(10): # 20-epochs
i = 0
for x,y in DL.samples(72):
X = tensor(x)
Y = tensor(y)
if use_cuda:
X = X.cuda()
Y = Y.cuda()
loss = model.forward(X,Y,True)
optimizer.zero_grad()
clip_grad_value_(model.parameters(),10)
loss.backward()
optimizer.step()
i += 1
if i%100 == 0:
loss_val = loss.data.to('cpu').numpy()
print(f'{r:5d}--{i:5d}--{loss_val:.3f}')
loss_val = loss.data.to('cpu').numpy()
print(f'{r:5d}--{i:5d}--{loss_val:.3f}')
"""on testing """
results = []
DL = DataLoader(read_data(f'/data/test.{f}.dat'),load_init())
for x,y in DL.samples(100):
X = tensor(x)
Y = tensor(y)
if use_cuda:
X = X.cuda()
Y = Y.cuda()
acc = model.forward(X,Y,False)
results.append(acc.tolist())
total_acc = sum(results) / len(results)
print(total_acc)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-06-17 08:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_cowhite_blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='cover_image',
field=models.ImageField(blank=True, null=True, upload_to='blog_post_images'),
),
migrations.AlterField(
model_name='blogpost',
name='categories',
field=models.ManyToManyField(blank=True, to='django_cowhite_blog.Category'),
),
migrations.AlterField(
model_name='blogpost',
name='related_posts',
field=models.ManyToManyField(blank=True, to='django_cowhite_blog.BlogPost'),
),
migrations.AlterField(
model_name='blogpost',
name='seo_title',
field=models.CharField(blank=True, help_text='Optional. This title is inserted in HTML Title tag. If not filled, blog title will be used.', max_length=500),
),
migrations.AlterField(
model_name='blogpost',
name='slug',
field=models.SlugField(blank=True, max_length=500, unique=True),
),
migrations.AlterField(
model_name='blogpost',
name='status',
field=models.CharField(choices=[('D', 'Draft'), ('P', 'Published')], default='D', max_length=1),
),
]
|
"""Plotting utilities."""
import networkx as nx
import matplotlib.pyplot as plt
from .nodes import Node
def _get_node_colors(nodes, path, colors):
"""For each node, assign color based on membership in path."""
node_colors = []
for x in nodes:
if x in path:
node_colors.append(colors['active'])
else:
node_colors.append(colors['inactive_node'])
return node_colors
def _get_edge_colors(G, path, colors):
"""For each edge, assign color based on membership in path_edges."""
path_edges = [(path[i], path[i + 1]) for i in range(len(path) - 1)]
edge_colors = []
for x in G.edges:
if x in path_edges:
edge_colors.append(colors['active'])
else:
edge_colors.append(colors['inactive_edge'])
return edge_colors
def _graphviz_layout(G):
"""
Hack because even though FakeDiGraph is subclassed from nx.DiGraph,
nx.nx_pydot.graphviz_layout() doesn't like it.
"""
if type(G) != nx.classes.digraph.DiGraph:
G = nx.from_dict_of_dicts(nx.to_dict_of_dicts(G))
return nx.nx_pydot.graphviz_layout(G, prog='dot')
def _draw_node_shapes(G, node_map, path, pos, colors, node_size):
"""
Hack to draw node shapes because nx.draw() does not accept a list for
`node_shape`.
"""
for node_type in Node.get_subclasses():
node_list = {ix for ix, node in node_map.items()
if isinstance(node, node_type)}
node_colors = _get_node_colors(node_list, path, colors)
nx.draw_networkx_nodes(
G,
pos,
node_shape=node_type.plot_shape,
node_size=node_size,
node_color=node_colors,
nodelist=node_list,
)
# TODO: add legend for active vs. inactive node colors
def plot_dag(G, title=None, show_ids=False, path=None, pos=None, figsize=(20, 24), fpath=None):
"""
Generate Matplotlib rendering of graph structure.
Args:
title: plot title
show_ids: if True, prefix node labels with node ID
path: array of node indices representing a path through the graph
pos: map of node indices to (x, y) coordinates for the plot
figsize: figure size
fpath: file path to save plot
"""
colors = {
'active': 'lightskyblue',
'inactive_node': 'thistle',
'inactive_edge': 'black',
}
path = path or []
plt.figure(figsize=figsize)
plt.title(title)
pos = pos or _graphviz_layout(G)
node_map = {ix: data['node'] for ix, data in G.nodes(data=True)}
if show_ids:
node_labels = {ix: f'({node.id}) {node.label}' for ix,
node in node_map.items()}
else:
node_labels = {ix: node.label for ix, node in node_map.items()}
edge_colors = _get_edge_colors(G, path, colors)
node_size = 4000
plt_config = {
'pos': pos,
'width': 1.5,
'font_size': 10,
'font_color': 'black',
'edge_color': edge_colors,
# making invisible nodes so edges draw correctly,
# then nodes are drawn later
'node_size': node_size + 200,
'node_color': 'white',
'node_shape': 's',
'labels': node_labels,
# https://matplotlib.org/stable/gallery/userdemo/connectionstyle_demo.html
'connectionstyle': 'arc3,rad=0.',
}
nx.draw(G, **plt_config)
_draw_node_shapes(G, node_map, path, pos, colors, node_size)
edge_labels = {
'false': {k: v for k, v in nx.get_edge_attributes(G, 'label').items() if not v},
'true': {k: v for k, v in nx.get_edge_attributes(G, 'label').items() if v},
}
nx.draw_networkx_edge_labels(
G,
pos,
edge_labels=edge_labels['false'],
font_color='red',
)
nx.draw_networkx_edge_labels(
G,
pos,
edge_labels=edge_labels['true'],
font_color='green',
)
if fpath:
plt.savefig(fpath)
else:
plt.show()
|
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""risto.py -- Robot Framework's Historical Reporting Tool
Version: <VERSION>
Usage: risto.py options input_files
or: risto.py options1 --- options2 --- optionsN --- input files
or: risto.py --argumentfile path
risto.py plots graphs about test execution history based on statistics
read from Robot Framework output files. Actual drawing is handled by
Matplotlib tool, which must be installed separately. More information
about it, including installation instructions, can be found from
http://matplotlib.sourceforge.net.
By default risto.py draws total, passed and failed graphs for critical
tests and all tests, but it is possible to omit some of these graphs
and also to add graphs by tags. Names of test rounds that are shown on
the x-axis are, by default, got from the paths to input files.
Alternatively, names can be got from the metadata of the top level
test suite (see Robot Framework's '--metadata' option for more details).
The graph is saved into a file specified with '--output' option, and the
output format is got from the file extension. Supported formats depend on the
installed Matplotlib back-ends, but at least PNG ought to be always available.
If the output file is omitted, the graph is opened into Matplotlib's image
viewer (which requires Matplotlib to be installed with some graphical
front-end).
It is possible to draw multiple graphs with different options at once. This
is done by separating different option groups with three or more hyphens
('---'). Note that in this case also paths to input files need to be
separated from last options similarly.
Instead of giving all options from the command line, it is possible to read
them from a file specified with '--argument' option. In an argument file
options and their possible argument are listed one per line, and option
groups are separated with lines of three or more hyphens. Empty lines and
lines starting with a hash mark ('#') are ignored.
Options:
-C --nocritical Do not plot graphs for critical tests.
-A --noall Do not plot graphs for all tests.
-T --nototals Do not plot total graphs.
-P --nopassed Do not plot passed graphs.
-F --nofailed Do not plot failed graphs.
-t --tag name * Add graphs for these tags. Name can contain '*' and
'?' as wildcards.
-o --output path Path to the image file to create. If not given, the
image is opened into Matplotlib's image viewer.
-i --title title Title of the graph. Underscores in the given title
are converted to spaces. By default there is no
title.
-w --width inches Width of the image. Default is 800.
-h --height inches Height of the image. Default is 400.
-f --font size Font size used for legends and labels. Default is 8.
-m --marker size Size of marked used with tag graphs. Default is 5.
-x --xticks num Maximum number of ticks in x-axis. Default is 15.
-n --namemeta name Name of the metadata of the top level test suite
where to get name of the test round. By default names
are got from paths to input files.
--- Used to group options when creating multiple images
at once.
--argumentfile path Read arguments from the specified file.
--verbose Verbose output.
--help Print this help.
--version Print version information.
Examples:
risto.py --output history.png output1.xml output2.xml output3.xml
risto.py --title My_Report --noall --namemeta Date --output out.png *.xml
risto.py --nopassed --tag smoke --tag iter-* results/*/output.xml
risto.py -CAP -t tag1 --- -CAP -t tag2 --- -CAP -t tag3 --- outputs/*.xml
risto.py --argumentfile arguments.txt
====[arguments.txt]===================
--title Overview
--output overview.png
----------------------
--nocritical
--noall
--nopassed
--tag smoke1
--title Smoke Tests
--output smoke.png
----------------------
path/to/*.xml
======================================
"""
from __future__ import with_statement
import os.path
import sys
import glob
try:
from matplotlib import pylab
from matplotlib.lines import Line2D
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import get_current_fig_manager
except ImportError:
raise ImportError('Could not import Matplotlib modules. Install it form '
'http://matplotlib.sourceforge.net/')
try:
from robot import utils
from robot.errors import DataError, Information
except ImportError:
raise ImportError('Could not import Robot Framework modules. '
'Make sure you have Robot Framework installed.')
__version__ = '1.0.2'
class AllStatistics(object):
def __init__(self, paths, namemeta=None, verbose=False):
self._stats = self._get_stats(paths, namemeta, verbose)
self._tags = self._get_tags()
def _get_stats(self, paths, namemeta, verbose):
paths = self._glob_paths(paths)
if namemeta:
return [Statistics(path, namemeta=namemeta, verbose=verbose)
for path in paths]
return [Statistics(path, name, verbose=verbose)
for path, name in zip(paths, self._get_names(paths))]
def _glob_paths(self, orig):
paths = []
for path in orig:
paths.extend(glob.glob(path))
if not paths:
raise DataError("No valid paths given.")
return paths
def _get_names(self, paths):
paths = [os.path.splitext(os.path.abspath(p))[0] for p in paths]
path_tokens = [p.replace('\\', '/').split('/') for p in paths]
min_tokens = min(len(t) for t in path_tokens)
index = -1
while self._tokens_are_same_at_index(path_tokens, index):
index -= 1
if abs(index) > min_tokens:
index = -1
break
names = [tokens[index] for tokens in path_tokens]
return [utils.printable_name(n, code_style=True) for n in names]
def _tokens_are_same_at_index(self, token_list, index):
first = token_list[0][index]
for tokens in token_list[1:]:
if first != tokens[index]:
return False
return len(token_list) > 1
def _get_tags(self):
stats = {}
for statistics in self._stats:
stats.update(statistics.tags)
return [stat.name for stat in sorted(stats.values())]
def plot(self, plotter):
plotter.set_axis(self._stats)
plotter.critical_tests([s.critical_tests for s in self._stats])
plotter.all_tests([s.all_tests for s in self._stats])
for tag in self._tags:
plotter.tag([s[tag] for s in self._stats])
class Statistics(object):
def __init__(self, path, name=None, namemeta=None, verbose=False):
if verbose:
print path
root = utils.ET.ElementTree(file=path).getroot()
self.name = self._get_name(name, namemeta, root)
stats = root.find('statistics')
crit_node, all_node = list(stats.find('total'))
self.critical_tests = Stat(crit_node)
self.all_tests = Stat(all_node)
self.tags = dict((n.text, Stat(n)) for n in stats.find('tag'))
def _get_name(self, name, namemeta, root):
if namemeta is None:
if name is None:
raise TypeError("Either 'name' or 'namemeta' must be given")
return name
metadata = root.find('suite').find('metadata')
if metadata:
for item in metadata:
if item.get('name','').lower() == namemeta.lower():
return item.text
raise DataError("No metadata matching '%s' found" % namemeta)
def __getitem__(self, name):
try:
return self.tags[name]
except KeyError:
return EmptyStat(name)
class Stat(object):
def __init__(self, node):
self.name = node.text
self.passed = int(node.get('pass'))
self.failed = int(node.get('fail'))
self.total = self.passed + self.failed
self.doc = node.get('doc', '')
info = node.get('info', '')
self.critical = info == 'critical'
self.non_critical = info == 'non-critical'
self.combined = info == 'combined'
def __cmp__(self, other):
if self.critical != other.critical:
return self.critical is True and -1 or 1
if self.non_critical != other.non_critical:
return self.non_critical is True and -1 or 1
if self.combined != other.combined:
return self.combined is True and -1 or 1
return cmp(self.name, other.name)
class EmptyStat(Stat):
def __init__(self, name):
self.name = name
self.passed = self.failed = self.total = 0
self.doc = ''
self.critical = self.non_critical = self.combined = False
class Legend(Line2D):
def __init__(self, **attrs):
styles = {'color': '0.5', 'linestyle': '-', 'linewidth': 1}
styles.update(attrs)
Line2D.__init__(self, [], [], **styles)
class Plotter(object):
_total_color = 'blue'
_pass_color = 'green'
_fail_color = 'red'
_background_color = '0.8'
_xtick_rotation = 20
_default_width = 800
_default_height = 400
_default_font = 8
_default_marker = 5
_default_xticks = 15
_dpi = 100
_marker_symbols = 'o s D ^ v < > d p | + x 1 2 3 4 . ,'.split()
def __init__(self, tags=None, critical=True, all=True, totals=True,
passed=True, failed=True, width=None, height=None, font=None,
marker=None, xticks=None):
self._xtick_limit, self._font_size, self._marker_size, width, height \
= self._get_sizes(xticks, font, marker, width, height)
self._figure = pylab.figure(figsize=(width, height))
self._axes = self._figure.add_axes([0.05, 0.15, 0.65, 0.70])
# axes2 is used only for getting ytick labels also on right side
self._axes2 = self._axes.twinx()
self._axes2.set_xticklabels([], visible=False)
self._tags = tags or []
self._critical = critical
self._all = all
self._totals = totals
self._passed = passed
self._failed = failed
self._legends = []
self._markers = iter(self._marker_symbols)
def _get_sizes(self, xticks, font, marker, width, height):
xticks = xticks or self._default_xticks
font = font or self._default_font
marker = marker or self._default_marker
width = width or self._default_width
height = height or self._default_height
try:
return (int(xticks), int(font), int(marker),
float(width)/self._dpi, float(height)/self._dpi)
except ValueError:
raise DataError('Width, height, font and xticks must be numbers.')
def set_axis(self, stats):
slen = len(stats)
self._indexes = range(slen)
self._xticks = self._get_xticks(slen, self._xtick_limit)
self._axes.set_xticks(self._xticks)
self._axes.set_xticklabels([stats[i].name for i in self._xticks],
rotation=self._xtick_rotation,
size=self._font_size)
self._scale = (slen-1, max(s.all_tests.total for s in stats))
def _get_xticks(self, slen, limit):
if slen <= limit:
return range(slen)
interval, extra = divmod(slen-1, limit-1) # 1 interval less than ticks
if interval < 2:
interval = 2
limit, extra = divmod(slen-1, interval)
limit += 1
return [ self._get_index(i, interval, extra) for i in range(limit) ]
def _get_index(self, count, interval, extra):
if count < extra:
extra = count
return count * interval + extra
def critical_tests(self, stats):
if self._critical:
line = {'linestyle': '--', 'linewidth': 1}
self._plot(self._indexes, stats, **line)
self._legends.append(Legend(label='critical tests', **line))
def all_tests(self, stats):
if self._all:
line = {'linestyle': ':', 'linewidth': 1}
self._plot(self._indexes, stats, **line)
self._legends.append(Legend(label='all tests', **line))
def tag(self, stats):
if utils.MultiMatcher(self._tags).match(stats[0].name):
line = {'linestyle': '-', 'linewidth': 0.3}
mark = {'marker': self._get_marker(),
'markersize': self._marker_size}
self._plot(self._indexes, stats, **line)
markers = [stats[index] for index in self._xticks]
self._plot(self._xticks, markers, linestyle='', **mark)
line.update(mark)
label = self._get_tag_label(stats)
self._legends.append(Legend(label=label, **line))
def _get_tag_label(self, stats):
label = stats[0].name
# need to go through all stats because first can be EmptyStat
for stat in stats:
if stat.critical:
return label + ' (critical)'
if stat.non_critical:
return label + ' (non-critical)'
return label
def _get_marker(self):
try:
return self._markers.next()
except StopIteration:
return ''
def _plot(self, xaxis, stats, **attrs):
total, passed, failed \
= zip(*[(s.total, s.passed, s.failed) for s in stats])
if self._totals:
self._axes.plot(xaxis, total, color=self._total_color, **attrs)
if self._passed:
self._axes.plot(xaxis, passed, color=self._pass_color, **attrs)
if self._failed:
self._axes.plot(xaxis, failed, color=self._fail_color, **attrs)
def draw(self, output=None, title=None):
self._set_scale(self._axes)
self._set_scale(self._axes2)
self._set_legends(self._legends[:])
if title:
title = title.replace('_', ' ')
self._axes.set_title(title, fontsize=self._font_size*1.8)
if output:
self._figure.savefig(output, facecolor=self._background_color,
dpi=self._dpi)
else:
if not hasattr(self._figure, 'show'):
raise DataError('Could not find a graphical front-end for '
'Matplotlib.')
self._figure.show()
if title:
figman = get_current_fig_manager()
figman.set_window_title(title)
def _set_scale(self, axes):
width, height = self._scale
axes.axis([-width*0.01, width*1.01, -height*0.04, height*1.04])
def _set_legends(self, legends):
legends.insert(0, Legend(label='Styles:', linestyle=''))
legends.append(Legend(label='', linestyle=''))
legends.append(Legend(label='Colors:', linestyle=''))
if self._totals:
legends.append(Legend(label='total', color=self._total_color))
if self._passed:
legends.append(Legend(label='passed', color=self._pass_color))
if self._failed:
legends.append(Legend(label='failed', color=self._fail_color))
labels = [l.get_label() for l in legends]
self._figure.legend(legends, labels, loc='center right',
numpoints=3, borderpad=0.1,
prop=FontProperties(size=self._font_size))
class Ristopy(object):
def __init__(self):
self._arg_parser = utils.ArgumentParser(__doc__, version=__version__)
def main(self, args):
args = self._process_possible_argument_file(args)
try:
opt_groups, paths = self._split_to_option_groups_and_paths(args)
except ValueError:
viewer_open = self._plot_one_graph(args)
else:
viewer_open = self._plot_multiple_graphs(opt_groups, paths)
if viewer_open:
try:
raw_input('Press enter to exit.\n')
except (EOFError, KeyboardInterrupt):
pass
pylab.close('all')
def _plot_one_graph(self, args):
opts, paths = self._arg_parser.parse_args(args)
stats = AllStatistics(paths, opts['namemeta'], opts['verbose'])
output = self._plot(stats, opts)
return output is None
def _plot_multiple_graphs(self, opt_groups, paths):
viewer_open = False
stats = AllStatistics(paths, opt_groups[0]['namemeta'],
opt_groups[0]['verbose'])
for opts in opt_groups:
output = self._plot(stats, opts)
viewer_open = output is None or viewer_open
return viewer_open
def _plot(self, stats, opts):
plotter = Plotter(opts['tag'], not opts['nocritical'],
not opts['noall'], not opts['nototals'],
not opts['nopassed'], not opts['nofailed'],
opts['width'], opts['height'], opts['font'],
opts['marker'], opts['xticks'])
stats.plot(plotter)
plotter.draw(opts['output'], opts['title'])
if opts['output']:
print os.path.abspath(opts['output'])
return opts['output']
def _process_possible_argument_file(self, args):
try:
index = args.index('--argumentfile')
except ValueError:
return args
path = args[index+1]
try:
lines = open(path).readlines()
except IOError:
raise DataError("Invalid argument file '%s'" % path)
fargs = []
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
elif line.startswith('-'):
fargs.extend(line.split(' ', 1))
else:
fargs.append(line)
args[index:index+2] = fargs
return args
def _split_to_option_groups_and_paths(self, args):
opt_groups = []
current = []
for arg in args:
if arg.replace('-', '') == '' and len(arg) >= 3:
opts = self._arg_parser.parse_args(current)[0]
opt_groups.append(opts)
current = []
else:
current.append(arg)
if opt_groups:
return opt_groups, current
raise ValueError("Nothing to split")
if __name__ == '__main__':
try:
Ristopy().main(sys.argv[1:])
except Information, msg:
print str(msg)
except DataError, err:
print '%s\n\nTry --help for usage information.' % err
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os, platform, argparse
from datetime import datetime
from util import util
# gestion des options en ligne de commande
parser = argparse.ArgumentParser (description=u'Interface Imagine - broker MQTT')
group = parser.add_argument_group ('MQTT', 'Options de connexion Mosquitto')
group.add_argument ('--broker', '-b', default='127.0.0.1', help=u'adresse serveur - localhost par défaut')
group.add_argument ('--port', '-p', default=1883, type=int, help=u'port écoute - 1883 par défaut')
group.add_argument ('--id', '-i', default='smart-', help=u'préfixe id client - smart- par défaut')
group.add_argument ('--login', '-l', default='imagine', help=u'login utilisateur')
group = parser.add_argument_group ('Arduino', 'Options de connexion carte Arduino')
group.add_argument ('--serie', '-s', default='COM4', help=u'port série - COM4 par défaut')
group.add_argument ('--vitesse', '-v', default=9600, type=int, help=u'vitesse voie série - 9600 bauds par défaut')
group = parser.add_argument_group ('Debug', 'Options de debug')
group.add_argument ('--trace', '-t', action='store_true', help=u'active les traces MQTT')
group.add_argument ('--quiet', '-q', action='store_true', help=u'désactive les traces console')
args = parser.parse_args ()
# Gestion des logs
logger = util.init_log (args.login, args.quiet)
# récupération mot de passe pour connexion MQTT broker
try:
mqtt_passwd = os.environ['MQTT_PASSWD']
except KeyError as e:
logger.error (u'variable d\'environnement MQTT_PASSWD absente')
exit (1)
# import des librairies
try:
import serial
import paho.mqtt.client as mqtt
except ImportError as e:
logger.critical (e)
exit (1)
# définition call-backs MQTT Mosquito ('userdata' correspond au logger)
def on_connect (client, userdata, flags, rc):
if rc != 0:
if rc == 1:
userdata.error (u'connexion MQTT: protocole incorrect')
elif rc == 2:
userdata.error (u'connexion MQTT: identifiant client invalide')
elif rc == 3:
userdata.critical (u'connexion MQTT: serveur non disponible')
elif rc == 4:
userdata.error (u'connexion MQTT: mauvais login ou mot de passe')
elif rc == 5:
userdata.error (u'connexion MQTT: non autorisé')
else:
userdata.critical (u'connexion MQTT: erreur {code}'.format(code=str(rc)))
userdata.info (u'connexion MQTT réussie')
def on_publish (client, userdata, mid):
userdata.info (u'MQTT: msg {0} publié'.format(str(mid)))
def on_disconnect (client, userdata, rc):
userdata.info (u'MQTT - déconnexion: {code}'.format (code=str(rc)))
def on_log (client, userdata, level, buf):
userdata.info ('MQTT log: {l}-{b}'.format (l=level, b=buf))
# vérification OS
platform_name = platform.system ()
if (platform_name != 'Windows') and (platform_name != 'Linux'):
logger.critical (u'système non supporté: {0}'.format (platform_name))
exit (1)
logger.info (u'Démarrage - MQTT: {0}-{1}-{2} Arduino: {3}-{4}'.format (
args.broker, args.port, args.login, args.serie, args.vitesse))
# création objet Serial
ser = serial.Serial ()
ser.port = args.serie
ser.baudrate = args.vitesse
ser.timeout = 10
# ouverture port série pour communication Arduino
try:
ser.open ()
except serial.SerialException as e:
logger.critical (e)
exit (1)
# création du client MQTT, mise en place du 'will' et infos de connexion
client = mqtt.Client (client_id=args.id+args.login, clean_session=True, userdata=logger)
client.will_set (topic='monitor/imagine', payload='hs', qos=0, retain=True)
client.username_pw_set (username=args.login, password=mqtt_passwd)
# mise en place des call-backs
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
if args.trace is True:
client.on_log = on_log
# tentative connection au broker MQTT
try:
client.connect (host=args.broker, port=args.port, keepalive=60)
except IOError as e:
logger.critical (e)
exit (1)
# démarrage boucle évènements MQTT
client.loop_start ()
(rc, mid) = client.publish (topic='monitor/imagine', payload='in', qos=0, retain=True)
if rc != mqtt.MQTT_ERR_SUCCESS:
logger.critical (u'erreur publication MQTT: vérifier données connexion')
sys.exit (1)
# boucle principale de traitement des messages sur ligne série Arduino
try:
ser.reset_input_buffer ()
while True:
msg = ser.readline ()
if not msg:
logger.warning (u'sortie sur délai d\'attente voie série')
else:
data = msg.split (';')
print (data)
ts = datetime.fromtimestamp(float(data[0])).strftime('%Y-%m-%d %H:%M:%S')
print (ts)
temp = data[2].split (':')
logger.info (u'température: {t}'.format (t=float(temp[1])))
(rc, mid) = client.publish (topic=data[1]+'/temp', payload=float(temp[1]), qos=0, retain=False)
if rc != mqtt.MQTT_ERR_SUCCESS:
logger.critical (u'erreur publication MQTT: vérifier données connexion')
sys.exit (1)
print (rc, mid)
lum = data[3].split(':')
logger.info (u'température: {t}'.format (t=float(lum[1])))
(rc, mid) = client.publish (topic=data[1]+'/lum', payload=int(lum[1]), qos=0, retain=False)
if rc != mqtt.MQTT_ERR_SUCCESS:
logger.critical (u'erreur publication MQTT: vérifier données connexion')
sys.exit (1)
print (rc, mid)
except KeyboardInterrupt as e:
ser.close ()
client.disconnect ()
logger.info (u'arrêt du programme')
|
import random
def random_hash(length=64) -> str:
return "".join(random.choice("0123456789abcdef") for i in range(length))
|
"""Holds documentation for the bot in dict form."""
def help_book(p):
return [
# Table of Contents
{
"title": "Table of Contents",
"description": f"Navigate between pages with the reaction buttons",
"1. Vibrant Tutorial": ["Learn how to use the bot and its commands"],
"2. Theme Tutorial": ["Learn how to use themes"],
"3. General Commands": ["Shows a list of general commands"],
"4. Color Commands": ["Shows a list of color related commands"],
"5. Theme Commands": ["Shows a list of theme related commands"],
"6. Macros": ["Shows a list of macros the bot has"],
"7. Alias Dictionary": ["All of the aliases that commands have to make input easier"],
"-----------------------------": ["[Vote for Vibrant](https://top.gg/bot/821278454711320576/vote) | [Support Server](https://discord.gg/rhvyup5) | [Github](https://github.com/Gumbachi/Vibrant)"]
},
# Tutorial
{
"title": "Vibrant Tutorial",
"description": " ",
"Manage Colors": [
f"1. Type `{p}add #ff0000 My Color` to add a color",
f"2. Type `{p}colors` to view your colors",
f"3. Type `{p}rename 1 | Blue` to rename the color",
f"4. Type `{p}recolor 1 | #0000ff` to change the look of a color"
],
"Assign Colors": [
f"1. Type `{p}colorme 1` to color yourself",
f"2. Type `{p}color @user 1` to color someone else",
f"3. Type `{p}splash` to color everyone without a color"
],
"Remove Colors": [
f"1. Type `{p}remove 1` to remove the first color",
f"2. Type `{p}clear_colors` to remove all colors"
],
"Color New People": [
f"Welcome people by typing `{p}welcome` in the desired channel. Typing the command again will turn welcoming off",
"The \"welcome channel\" is where the bot will send Hello/Goodbye messages",
"With a welcome channel set the bot will randomly color any new member"
],
"Themes": [f"Click the ➡️ to learn about themes"]
},
# Theme Tutorial
{
"title": "Theme Tutorial",
"description": "Themes work like save states. They record your colors and the members they are applied to so you can save your setup and use different ones without having to rebuild them",
"Using Presets": [
f"1. Type `{p}imports` to see available presets",
f"2. Type `{p}import vibrant` to import a theme",
f"3. Type `{p}load vibrant` to set your colors",
f"4. Type `{p}splash` to apply your colors to the server"
],
"Custom Themes": [
f"1. Type `{p}save My Theme` to save your current color setup",
f"2. Type `{p}themes` to view all of your themes",
f"3. Type `{p}theme.rename My Theme | Custom Theme` to rename a theme"
],
"Manage Themes": [
f"1. Type `{p}overwrite 1` to replace a theme with your current setup",
f"2. Type `{p}erase 1` to remove one of your themes"
]
},
# Commands
{
"title": "Commands",
"description": f"`{p}command <argument>`.\n`*` indicates an optional argument\n`<color>` can be a name or index",
"General Commands": [
f"`{p}prefix <new prefix*>` -- Changes the prefix the bot uses",
f"`{p}welcome` -- Toggles a channel for greeting users"
],
"Color Management Commands": [
f"`{p}colors` -- Shows available colors",
f"`{p}add <hexcode> <name*>` -- Adds a color",
f"`{p}remove <color>` -- Removes a color",
f"`{p}rename <color>|<name>` -- Changes a color's name",
f"`{p}recolor <color>|<hexcode>` -- Changes a color's looks",
f"`{p}clear_colors` -- Clears all of the colors"
],
"Color Assignment Commands": [
f"`{p}colorme <color*>` -- Assigns you your desired color or random",
f"`{p}color <user> <color*>` -- Gives a specific user a color",
f"`{p}uncolorme` -- Removes your color if you have one",
f"`{p}splash <color*>` -- Gives a color to everyone in the server without one",
f"`{p}unsplash` -- Uncolors everyone"
]
},
# Theme Commands
{
"title": "Theme Commands",
"description": f"{p}command <argument>.\n`*` indicates an optional argument\n`<theme>` can be a name or index",
"Theme Commands": [
f"`{p}themes` -- Draws a pretty list of themes",
f"`{p}imports` - - Shows available presets"
],
"Theme Management Commands": [
f"`{p}save <name*>` -- Saves your theme",
f"`{p}theme.remove <theme>` -- Deletes a theme",
f"`{p}overwrite <theme>` -- Replaces a theme",
f"`{p}load <theme>` -- Applies a saved theme to your server",
f"`{p}theme.rename <theme>|<name>` -- Changes a theme's name",
f"`{p}import <name>` -- Adds a preset as a theme"
]
},
# Macros
{
"title": "Macros",
"description": f"Macros are a way to execute multiple commands with one single command.\nThey make things clean and convenient",
"Import, Load, Splash, Overwrite": [
f"`{p}ilso <name>` -- Imports a preset, Loads that preset, Splashes everyone with a random color, Overwrites the imported theme with member colors"
],
"Load, Splash, Overwrite": [f"`{p}lso <name>` -- Same as ILSO but with an already saved theme"],
"Add, Colorme": [f"`{p}acm <hexcode> <name*>` -- Adds a color and then applies it to you"],
"Resplash": [f"`{p}resplash` -- Uncolors everyone and then splashes the server"],
"Suggestions": ["If you have suggestions for macros you would like then please let me know in the support server"]
},
# Alias Dictionary
{
"title": "Command Aliases",
"description": "Most commands have shorthand aliases\nAny commands with `color` in the name have an alias with `colour`",
"Color Commands": [
f"`{p}colors` -- `{p}c`",
f"`{p}color` -- `{p}cu`",
f"`{p}colorme` -- `{p}me`, `{p}cm`",
f"`{p}uncolorme` -- `{p}ucm`",
f"`{p}add` -- `{p}new`",
f"`{p}remove` -- `{p}delete`",
f"`{p}rename` -- `{p}rn`",
f"`{p}recolor` -- `{p}rc`"
],
"Theme Commands": [
f"`{p}themes` -- `{p}t`, `{p}temes`",
f"`{p}imports` -- `{p}presets`",
f"`{p}load` -- `{p}theme.load`",
f"`{p}save` -- `{p}saveas`, `{p}theme.save`",
f"`{p}erase` -- `{p}t.r`, `{p}theme.remove`",
f"`{p}overwrite` -- `{p}theme.overwrite`",
f"`{p}trn` - - `{p}t.rn`, `{p}theme.rename`"
],
"Other Commands": [
f"`{p}prefix` -- `{p}vibrantprefix`"
]
}
]
change_log = {
"0.1": {
"title": "Vibrant 0.1",
"description": " ",
"@Vibrant for help": "Users can mention the bot to give info about help",
"Changeable Prefixes": "Users can change prefix with prefix command to avoid prefix conflict with other bots",
"Added patch notes": "you can see what I'm doing and I can see what I've done",
"Color adding prompts removed": "They no longer show up",
"Changed some help command things": "Made it so they show default prefixes"
},
"0.2": {
"title": "Vibrant 0.2",
"description": " ",
"Optimization": "Made many functions like prefix run faster",
"Optimized Data storage": "improved function input to be more specific to make it faster",
"Optimized splash command": "Splash runs faster due to better math",
},
"0.3": {
"title": "Vibrant 0.3",
"description": " ",
"Overhauled help command": "Gave help a bunch of useful stuff like setup and individual command help",
"`clear_all_colors` and `set` changed": "Commands now send a backup just incase",
"Changed data command name": "Changed it to channels since it only shows channel data",
"Added a force prefix change": "can use vibrantprefix command to avoid overlap"
},
"0.4": {
"title": "Vibrant 0.4",
"description": " ",
"Aliased Commands": "Gave a bunch of commands alternate names like add/remove can be create/delete if you want",
"Removed redundant commands": "removed redundant commands because I figured out how to alias commands",
"Better Error Handling": "ignores things like command not found and has specific error handling for add command",
},
"0.5": {
"title": "Vibrant 0.5",
"description": " ",
"Black color now works": "black no longer shows up as transparent because hex value is auto converted to #000001",
"Added more presets": "presets work differently and thus there are many more like Bootstrap, Metro, and Icecream",
"Better Drawing": "Made drawing images for commands like colors look better and more open",
"Preview command": "new command to show preset colors"
},
"0.6": {
"title": "Vibrant 0.6",
"description": " ",
"Changed the look of channels and expose": "Commands are simpler and easier to read",
"DM Commands": "Some commands like help and howdy work in a DM channel now",
"Less verbose": "Some commands are less verbose to clear up clutter",
"More error handling": "Some more errors are handled",
"Destroyed some bugs": "General stuff like me being stupid"
},
"0.7": {
"title": "Vibrant 0.7",
"description": " ",
"The return of reaction based UX": "Reaction based UX is back and works this time",
"updated pfp algorithm": "Algorithm is more accurate now",
"DBL integration": "better integration with the API",
"Hyperlinks": "inline links for help to clean things up"
},
"0.8": {
"title": "Vibrant 0.8",
"description": " ",
"Themes(alpha)": "Themes not ready yet but kind of work",
"Housekeeping": "Cleaned up a bunch of things that weren't necessary",
"Added some functions to classes": "less imports, better looking",
"Code documentation": "I can see what everything does easier. so can you if you care",
"Splash changed": "Splash command now colors in an even distribution of colors",
"Patchnotes": "Patchnotes doesnt bypass disabled channels now",
"Help works": "help wont give setup every time",
},
"0.9": {
"title": "Vibrant 0.9",
"description": " ",
"Themes": "Themes allow you to save presets which allows switching the feel of the server",
"Serialization": "Custom serialization per object to allow for the use of sets",
"The use of python sets": "No more duplicate role members",
"Clearing colors faster": "Fixed a bug that massively slowed down clearing colors",
"Smarter updates": "The database is updated less but at better times to save your time",
"Changed some functions": "Some functions within the code are now faster and smarter",
},
"1.0": {
"title": "Vibrant 1.0",
"description": " ",
"Themes Documentation": "Get help with using themes",
"Segmented help": "More help categories",
"Importing presets": "Can import named presets as themes",
},
"1.1": {
"title": "Vibrant 1.1",
"description": " ",
"Housekeeping": "New techniques for cleaner/faster code",
"Exceptions": "New way to handle errors should be more descriptive",
"Less prone to breaking": "Stricter error handling so less confusing errors",
"Fixed major bug with missing guild problems": "Should handle data better"
},
"1.2": {
"title": "Vibrant 1.2",
"description": " ",
"Overlapping data": "Member data should be handled properly due to a fixed constructor error",
"Unsplash is faster": "unsplash just deletes roles which should make it faster",
"Help update": "Help command is simplified and now works like a book with buttons",
"Overwrite simpler": "Overwrite just overwrite a theme now without changing name",
"imports command": "You can now view all presets",
"Pages for everything": "Everything that can be paginated is",
"Better UX": "Asks for hexcode and then colors the user you wanted"
},
"1.3": {
"title": "Vibrant 1.3",
"description": " ",
"Smarter data handling": "Tries to fill in gaps with the data before reporting error",
"Paginated Images changed": "No more double images its just one now for simplicity",
"Back to PNG": "Apparently WebP doesn't work on iOS :(",
"Visual Changes": "Switched a lot of responses to embeds"
},
"1.4": {
"title": "Vibrant 1.4",
"description": " ",
"Role positioning": "Creates roles under the bot role instead of at the bottom",
"Theme limit": "Changed default limit to 5"
},
"2.0": {
"title": "Vibrant 2.0(rewrite)",
"description": "Same bot but written better",
"No more channel disabling": "got rid of a useless feature",
"Better databasing": "better database interaction",
"less code": "less code = better",
"less data": "bot stores less data",
},
"2.1": {
"title": "Vibrant 2.1",
"description": "Added Macros",
"Macros": "Run multiple commands in common patterns with one command",
"Caught some errors": "Caught 50001 and 50013 API Errors"
}
}
|
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'pyclass.views.index'),
url(r'^profile/', include('pyclass.profiles.urls')),
url(r'^todo/', include('pyclass.todo.urls')),
url(r'^comments/', include('django.contrib.comments.urls')),
# Enables the default backend for django-registration.
# TODO Set up (or remove) Site and correct e-mail and password before using
# url(r'^accounts/', include('registration.backends.default.urls')),
# Url for search form in navbar
url(r'^search/', 'pyclass.views.search'),
#Simplified registration (no email sent)
url(r'^accounts/', include('registration.backends.simple.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from cloudify.manager import get_rest_client
from cloudify.workflows import ctx, parameters
instance = next(ctx.node_instances)
instance.execute_operation('custom_lifecycle.custom_operation',
kwargs={'update_id': parameters.update_id})
rest_client = get_rest_client()
rest_client.deployment_updates.finalize_commit(parameters.update_id)
|
"""Posts forms"""
# Django
from django import forms
# Models
from posts.models import Post
class PostForm(forms.ModelForm):
"""Post model form"""
class Meta:
"""Form settings."""
model = Post
fields = ('profile', 'title', 'photo')
|
from ....cost.redshift_common import RedshiftPerformanceIterator
def test_init():
rpi = RedshiftPerformanceIterator()
assert True # no exception
from moto import mock_redshift
@mock_redshift
def test_iterateCore_none(mocker):
# mock the get regions part
mockreturn = lambda service: ['us-east-1']
mockee = 'boto3.session.Session.get_available_regions'
mocker.patch(mockee, side_effect=mockreturn)
# test
rpi = RedshiftPerformanceIterator()
x = list(rpi.iterate_core())
assert len(x) == 0
@mock_redshift
def test_iterateCore_exists(mocker):
# mock the get regions part
mockreturn = lambda service: ['us-east-1']
mockee = 'boto3.session.Session.get_available_regions'
mocker.patch(mockee, side_effect=mockreturn)
# undo some region settings from before
import boto3
boto3.setup_default_session(region_name='us-east-1')
# create mock redshift
import boto3
redshift_client = boto3.client('redshift')
redshift_client.create_cluster(
ClusterIdentifier='abc',
NodeType='abc',
MasterUsername='abc',
MasterUserPassword='abc'
)
# test
rpi = RedshiftPerformanceIterator()
rpi.region_include = ['us-east-1']
x = list(rpi.iterate_core())
assert len(x) == 1
# cannot name function "test_iterator" because the filename is as such
# pytest .../test_iterator.py -k 'test_iterator' would run all tests, not just this one
def test_iteratorBuiltin(mocker):
import datetime as dt
dt_now = dt.datetime.utcnow()
# patch 1
ex_iterateCore = [
{'ClusterIdentifier': 'abc'}, # no creation time
{'ClusterIdentifier': 'abc', 'ClusterCreateTime': dt_now}, # with creation time
]
mockreturn = lambda *args, **kwargs: ex_iterateCore
mockee = 'isitfit.cost.redshift_common.RedshiftPerformanceIterator.iterate_core'
mocker.patch(mockee, side_effect=mockreturn)
# patch 2
#mockreturn = lambda *args, **kwargs: 1
#mockee = 'isitfit.cost.redshift_common.RedshiftPerformanceIterator.handle_cluster'
#mocker.patch(mockee, side_effect=mockreturn)
# patch 3
##import pandas as pd
#mockreturn = lambda *args, **kwargs: 'a dataframe' #pd.DataFrame()
#mockee = 'isitfit.cost.redshift_common.RedshiftPerformanceIterator.handle_metric'
#mocker.patch(mockee, side_effect=mockreturn)
# test
rpi = RedshiftPerformanceIterator()
x = list(rpi)
assert len(x) == 1
assert x[0][0] == ex_iterateCore[1]
assert x[0][1] == 'abc' # 'a dataframe'
|
#!/usr/bin/env python
"""
================================================
ABElectronics Expander Pi | RTC memory integer demo
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python demo_rtcmemory_int.py
================================================
This demo shows how to write to and read from the internal battery
backed memory on the DS1307 RTC chip
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
try:
import ExpanderPi
except ImportError:
print("Failed to import ExpanderPi from python system path")
print("Importing from parent folder instead")
try:
import sys
sys.path.append('..')
import ExpanderPi
except ImportError:
raise ImportError(
"Failed to import library from parent folder")
def int_to_array(val):
'''
convert an integer into a four byte array
'''
arraybytes = [0, 0, 0, 0]
arraybytes[3] = val & 0xFF
val >>= 8
arraybytes[2] = val & 0xFF
val >>= 8
arraybytes[1] = val & 0xFF
val >>= 8
arraybytes[0] = val & 0xFF
return arraybytes
def array_to_int(arraybytes):
'''
convert a four byte array into an integer
'''
val = (arraybytes[0] << 24) + (arraybytes[1] << 16) + \
(arraybytes[2] << 8) + arraybytes[3]
return val
def main():
'''
Main program function
'''
# create a new instance of the RTC class
rtc = ExpanderPi.RTC()
# integer to be written to the RTC memory
writeval = 176247
print("Writing to memory: ", writeval)
# convert the integer into an array of bytes
writearray = int_to_array(writeval)
# write the array to the RTC memory
rtc.write_memory(0x08, writearray)
# read four bytes from the RTC memory into an array
readarray = rtc.read_memory(0x08, 4)
# combine the array values into an integer and print it
print("Reading from memory: ", array_to_int(readarray))
if __name__ == "__main__":
main()
|
import dash
from dash.dependencies import Input, Output, State, ALL, MATCH
import dash_html_components as html
import dash_core_components as dcc
import plotly.express as px
import yfinance as yf
df = px.data.gapminder()
app = dash.Dash(__name__)
app.layout = html.Div([
html.Div(children=[
dcc.Dropdown(
options=[{
'label': i,
'value': i
} for i in df.country.unique()],
value='Canada',
id='country',
style={'display': 'inline-block', 'width': 200}
),
html.Button(
'Add Chart', id='add-chart', n_clicks=0,
style={'display': 'inline-block'}
),
]),
html.Div(id='container', children=[])
])
def create_figure(column_x, column_y, country):
chart_type = px.line if column_x == 'year' else px.scatter
return chart_type(
df.query("country == '{}'".format(country)),
x=column_x,
y=column_y,
)\
.update_layout(
title='{} {} vs {}'.format(country, column_x, column_y),
margin_l=10, margin_r=0, margin_b=30)\
.update_xaxes(title_text='').update_yaxes(title_text='')
@app.callback(
Output('container', 'children'),
[Input('add-chart', 'n_clicks')],
[State('container', 'children'),
State('country', 'value')])
def display_dropdowns(n_clicks, children, country):
default_column_x = 'year'
default_column_y = 'gdpPercap'
new_element = html.Div(
style={'width': '23%', 'display': 'inline-block', 'outline': 'thin lightgrey solid', 'padding': 10},
children=[
dcc.Graph(
id={
'type': 'dynamic-output',
'index': n_clicks
},
style={'height': 300},
figure=create_figure(default_column_x, default_column_y, country)
),
dcc.Dropdown(
id={
'type': 'dynamic-dropdown-x',
'index': n_clicks
},
options=[{'label': i, 'value': i} for i in df.columns],
value=default_column_x
),
dcc.Dropdown(
id={
'type': 'dynamic-dropdown-y',
'index': n_clicks
},
options=[{'label': i, 'value': i} for i in df.columns],
value=default_column_y
),
]
)
children.append(new_element)
return children
@app.callback(
Output({'type': 'dynamic-output', 'index': MATCH}, 'figure'),
[Input({'type': 'dynamic-dropdown-x', 'index': MATCH}, 'value'),
Input({'type': 'dynamic-dropdown-y', 'index': MATCH}, 'value'),
Input('country', 'value')],
)
def display_output(column_x, column_y, country):
return create_figure(column_x, column_y, country)
if __name__ == '__main__':
app.run_server(debug=True)
|
import m2
import subpkg.m3
print(m2, subpkg.m3.VAR)
|
"""
Code adapted from Dan Krause.
https://gist.github.com/dankrause/6000248
http://github.com/dankrause
"""
import socket
from http.client import HTTPResponse
from io import BytesIO
ST_DIAL = 'urn:dial-multiscreen-org:service:dial:1'
ST_ECP = 'roku:ecp'
class _FakeSocket(BytesIO):
def makefile(self, *args, **kw):
return self
class SSDPResponse(object):
def __init__(self, response):
self.location = response.getheader('location')
self.usn = response.getheader('usn')
self.st = response.getheader('st')
self.cache = response.getheader('cache-control').split('=')[1]
def __repr__(self):
return '<SSDPResponse({location}, {st}, {usn})'.format(**self.__dict__)
def discover(timeout=2, retries=1, st=ST_ECP):
group = ('239.255.255.250', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}'.format(*group),
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
m = message.format(st=st)
sock.sendto(m.encode(), group)
while 1:
try:
rhttp = HTTPResponse(_FakeSocket(sock.recv(1024)))
rhttp.begin()
if rhttp.status == 200:
rssdp = SSDPResponse(rhttp)
responses[rssdp.location] = rssdp
except socket.timeout:
break
return responses.values()
|
import random
from src.base.globalmaptiles import GlobalMercator
class UrlBuilder:
def __init__(self, zoom_level=19):
self._url_first_part = 'https://t'
self._url_second_part = '.ssl.ak.tiles.virtualearth.net/tiles/a'
self._url_last_part = '.jpeg?g=4401&n=z'
self._zoom_level = zoom_level
self._mercator = GlobalMercator()
def get_urls_by_tiles(self, t_minx, t_miny, t_maxx, t_maxy):
urls = []
for ty in range(t_miny, t_maxy + 1):
for tx in range(t_minx, t_maxx + 1):
quad_tree = self._mercator.QuadTree(tx, ty, self._zoom_level)
url = self._build_url(quad_tree)
urls.append(url)
return urls
def _build_url(self, quadtree):
server = random.randint(0, 7)
return self._url_first_part + str(server) + self._url_second_part + str(
quadtree) + self._url_last_part
|
#!/usr/bin/env python3
"""Train a model to classify herbarium traits."""
import argparse
import textwrap
from pathlib import Path
from pylib import db
from pylib import model_util as mu
from pylib.const import ALL_TRAITS
from pylib.herbarium_model import BACKBONES
from pylib.herbarium_model import HerbariumModel
from pylib.herbarium_runner import HerbariumTrainingRunner
def parse_args():
"""Process command-line arguments."""
description = """Train a herbarium phenology classifier model."""
arg_parser = argparse.ArgumentParser(
description=textwrap.dedent(description), fromfile_prefix_chars="@"
)
arg_parser.add_argument(
"--database",
"--db",
type=Path,
metavar="PATH",
required=True,
help="""Path to the SQLite3 database (angiosperm data).""",
)
arg_parser.add_argument(
"--save-model",
type=Path,
metavar="PATH",
required=True,
help="""Save best models to this path.""",
)
arg_parser.add_argument(
"--split-run",
metavar="NAME",
required=True,
help="""Which data split to use. Splits are saved in the database and each
one is used for a specific purpose.""",
)
arg_parser.add_argument(
"--trait",
choices=ALL_TRAITS,
required=True,
help="""Train to classify this trait.""",
)
arg_parser.add_argument(
"--backbone",
choices=list(BACKBONES.keys()),
default=list(BACKBONES.keys())[0],
help="""Which neural network backbone to use.""",
)
arg_parser.add_argument(
"--load-model",
type=Path,
metavar="PATH",
help="""Continue training with weights from this model.""",
)
arg_parser.add_argument(
"--log-dir",
type=Path,
metavar="PATH",
help="""Output log files to this directory.""",
)
arg_parser.add_argument(
"--learning-rate",
"--lr",
type=float,
metavar="FLOAT",
default=0.001,
help="""Initial learning rate. (default: %(default)s)""",
)
arg_parser.add_argument(
"--batch-size",
type=int,
metavar="INT",
default=16,
help="""Input batch size. (default: %(default)s)""",
)
arg_parser.add_argument(
"--workers",
type=int,
metavar="INT",
default=4,
help="""Number of workers for loading data. (default: %(default)s)""",
)
arg_parser.add_argument(
"--epochs",
type=int,
metavar="INT",
default=100,
help="""How many epochs to train. (default: %(default)s)""",
)
arg_parser.add_argument(
"--limit",
type=int,
metavar="INT",
help="""Limit the input to this many records.""",
)
args = arg_parser.parse_args()
mu.validate_split_runs(args)
return args
def main():
"""Train a model using just pytorch."""
args = parse_args()
orders = db.select_all_orders(args.database)
model = HerbariumModel(orders, args.backbone, args.load_model)
runner = HerbariumTrainingRunner(model, orders, args)
runner.run()
if __name__ == "__main__":
main()
|
""" This module defines the class QueryHMDBTimestamp.
It is written to get the release date of HMDB Database.
http://www.hmdb.ca/release-notes
"""
__author__ = ""
__copyright__ = ""
__credits__ = ['Deqing Qu', 'Stephen Ramsey']
__license__ = ""
__version__ = ""
__maintainer__ = ""
__email__ = ""
__status__ = "Prototype"
from ScrapingHelper import retrieve
class QueryHMDBTimestamp:
@staticmethod
def get_timestamp():
url = "http://www.hmdb.ca/release-notes"
soup = retrieve(url)
main_tag = soup.find('main')
if main_tag is None or len(main_tag) == 0:
return None
date_tag = main_tag.findChildren('h2')
if date_tag is None or len(date_tag) == 0:
return None
r = date_tag[0].text.split()
return r[-2] + "01," + r[-1]
if __name__ == '__main__':
print(QueryHMDBTimestamp.get_timestamp())
|
#!/usr/bin/env python
""" test_aes_cmac.py
Tests for AES CMAC from NIST 800-38B
References:
RFC 4493 - http://www.rfc-editor.org/rfc/rfc4493.txt
NIST - http://csrc.nist.gov/publications/nistpubs/800-38B/Updated_CMAC_Examples.pdf
test_aes_cmac.py(c) 2015 by Paul A. Lambert
test_aes_cmac.py is licensed under a
Creative Commons Attribution 4.0 International License.
"""
import unittest
if __name__ == '__main__' and __package__ is None:
from os import sys, path
p = path.abspath(__file__) # ./cryptopy/cipher/test/test_aes_cmac.py
for i in range(4): p = path.dirname( p ) # four levels down to project '.'
sys.path.append( p )
from cryptopy.cipher.aes_cmac import aes_cmac, subkey
class TestVectorsNIST_SP_800_38B(unittest.TestCase):
""" Test Vectors from NIST Special Publication 800-38B """
def test_AES128_CMAC(self):
""" FIPS 800-38B D.1 AES-128 """
# For Examples 1 to 4 below, the block cipher is the
# AES algorithm with the following 128 bit key:
key = '2b7e1516 28aed2a6 abf71588 09cf4f3c'
cmac_gen_test(
test = 'NIST SP_800-38B D.1 AES-128 - Subkey Generation',
key = key,
k1 = 'fbeed618 35713366 7c85e08f 7236a8de',
k2 = 'f7ddac30 6ae266cc f90bc11e e46d513b')
cmac_mac_test(
test = 'NIST SP_800-38B D.1 AES-128 - Example 1 MLen = 0',
key = key,
m = '',
t = 'bb1d6929 e9593728 7fa37d12 9b756746' )
cmac_mac_test(
test = 'NIST SP_800-38B D.1 AES-128 - Example 2 MLen = 128',
key = key,
m = '6bc1bee2 2e409f96 e93d7e11 7393172a',
t = '070a16b4 6b4d4144 f79bdd9d d04a287c' )
cmac_mac_test(
test = 'NIST SP_800-38B D.1 AES-128 - Example 3 MLen = 320',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411''',
t = 'dfa66747 de9ae630 30ca3261 1497c827' )
cmac_mac_test(
test = 'NIST SP_800-38B D.1 AES-128 - Example 4 MLen = 512',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411 e5fbc119 1a0a52ef
f69f2445 df4f9b17 ad2b417b e66c3710''',
t = '51f0bebf 7e3b9d92 fc497417 79363cfe' )
def test_AES192_CMAC(self):
""" FIPS 800-38B D.2 AES-192 """
key = '8e73b0f7 da0e6452 c810f32b 809079e5 62f8ead2 522c6b7b'
cmac_gen_test(
test = 'NIST SP_800-38B D.2 AES-192 - Subkey Generation',
key = key,
k1 = '448a5b1c 93514b27 3ee6439d d4daa296',
k2 = '8914b639 26a2964e 7dcc873b a9b5452c')
cmac_mac_test(
test = 'NIST SP_800-38B D.2 AES-192 - Example 5 MLen = 0',
key = key,
m = '',
t = 'd17ddf46 adaacde5 31cac483 de7a9367' )
cmac_mac_test(
test = 'NIST SP_800-38B D.2 AES-192 - Example 6 MLen = 128',
key = key,
m = '6bc1bee2 2e409f96 e93d7e11 7393172a',
t = '9e99a7bf 31e71090 0662f65e 617c5184' )
cmac_mac_test(
test = 'NIST SP_800-38B D.2 AES-192 - Example 7 MLen = 320',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411''',
t = '8a1de5be 2eb31aad 089a82e6 ee908b0e' )
cmac_mac_test(
test = 'NIST SP_800-38B D.2 AES-192 - Example 8 MLen = 512',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411 e5fbc119 1a0a52ef
f69f2445 df4f9b17 ad2b417b e66c3710''',
t = 'a1d5df0e ed790f79 4d775896 59f39a11' )
def test_AES256_CMAC(self):
""" FIPS 800-38B D.3 AES-256 """
key = '''603deb10 15ca71be 2b73aef0 857d7781
1f352c07 3b6108d7 2d9810a3 0914dff4''' # for examples 9 to 12
cmac_gen_test(
test = 'NIST SP_800-38B D.3 AES-256 - Subkey Generation',
key = key,
k1 = 'cad1ed03 299eedac 2e9a9980 8621502f',
k2 = '95a3da06 533ddb58 5d353301 0c42a0d9')
cmac_mac_test(
test = 'NIST SP_800-38B D.3 AES-256 - Example 9 MLen = 0',
key = key,
m = '',
t = '028962f6 1b7bf89e fc6b551f 4667d983' )
cmac_mac_test(
test = 'NIST SP_800-38B D.3 AES-256 - Example 10 MLen = 128',
key = key,
m = '6bc1bee2 2e409f96 e93d7e11 7393172a',
t = '28a7023f 452e8f82 bd4bf28d 8c37c35c' )
cmac_mac_test(
test = 'NIST SP_800-38B D.3 AES-256 - Example 11 MLen = 320',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411 ''',
t = 'aaf3d8f1 de5640c2 32f5b169 b9c911e6' )
cmac_mac_test(
test = 'NIST SP_800-38B D.3 AES-256 - Example 12 MLen = 512',
key = key,
m = '''6bc1bee2 2e409f96 e93d7e11 7393172a
ae2d8a57 1e03ac9c 9eb76fac 45af8e51
30c81c46 a35ce411 e5fbc119 1a0a52ef
f69f2445 df4f9b17 ad2b417b e66c3710''',
t = 'e1992190 549f6ed5 696a2c05 6c315410' )
def cmac_gen_test(test, key, k1='', k2=''):
""" NIST test of subkey generation of k1 and k2 from key """
key = decode_vector( key ) # key in string of hex
k1_known = decode_vector( k1 )
k2_known = decode_vector( k2 )
k1, k2 = subkey( key) # test this fucntion
# against known results
assert k1 == k1_known
assert k2 == k2_known
def cmac_mac_test(test='',key='', m='', t=''):
""" NIST test vector validation of CMAC output """
key = decode_vector( key )
m = decode_vector( m )
t = decode_vector( t )
cm = aes_cmac(key, m)
assert t == cm
def decode_vector(string):
""" Covert readable test vector string to an octet string """
return ''.join( string.split() ).decode('hex')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL dialects."""
if '.' not in __package__:
from compiler.dialect_libraries import bq_library
from compiler.dialect_libraries import psql_library
from compiler.dialect_libraries import sqlite_library
from compiler.dialect_libraries import trino_library
from compiler.dialect_libraries import presto_library
else:
from ..compiler.dialect_libraries import bq_library
from ..compiler.dialect_libraries import psql_library
from ..compiler.dialect_libraries import sqlite_library
from ..compiler.dialect_libraries import trino_library
from ..compiler.dialect_libraries import presto_library
def Get(engine):
return DIALECTS[engine]()
class Dialect(object):
pass
class BigQueryDialect(Dialect):
"""BigQuery SQL dialect."""
def Name(self):
return 'BigQuery'
def BuiltInFunctions(self):
return {}
def InfixOperators(self):
return {
'++': 'CONCAT(%s, %s)',
}
def Subscript(self, record, subscript):
return '%s.%s' % (record, subscript)
def LibraryProgram(self):
return bq_library.library
def UnnestPhrase(self):
return 'UNNEST({0}) as {1}'
def ArrayPhrase(self):
return 'ARRAY[%s]'
def GroupBySpecBy(self):
return 'name'
class SqLiteDialect(Dialect):
"""SqLite SQL dialect."""
def Name(self):
return 'SqLite'
def BuiltInFunctions(self):
return {
'Set': None,
'Element': "JSON_EXTRACT({0}, '$[{1}]')",
'Range': ('(select json_group_array(n) from (with recursive t as'
'(select 0 as n union all '
'select n + 1 as n from t where n + 1 < {0}) '
'select n from t))'),
'ValueOfUnnested': '{0}.value',
'List': 'JSON_GROUP_ARRAY({0})'
}
def InfixOperators(self):
return {
'++': '(%s) || (%s)',
'%' : '(%s) %% (%s)'
}
def Subscript(self, record, subscript):
return 'JSON_EXTRACT(%s, "$.%s")' % (record, subscript)
def LibraryProgram(self):
return sqlite_library.library
def UnnestPhrase(self):
return 'JSON_EACH({0}) as {1}'
def ArrayPhrase(self):
return 'JSON_ARRAY(%s)'
def GroupBySpecBy(self):
return 'name'
class PostgreSQL(Dialect):
"""PostgreSQL SQL dialect."""
def Name(self):
return 'PostgreSQL'
def BuiltInFunctions(self):
return {
'Range': '(SELECT ARRAY_AGG(x) FROM GENERATE_SERIES(0, {0} - 1) as x)',
'ToString': 'CAST(%s AS TEXT)',
'Element': '({0})[{1} + 1]',
'Size': 'ARRAY_LENGTH(%s, 1)',
'Count': 'COUNT(DISTINCT {0})'
}
def InfixOperators(self):
return {
'++': 'CONCAT(%s, %s)',
}
def Subscript(self, record, subscript):
return '(%s).%s' % (record, subscript)
def LibraryProgram(self):
return psql_library.library
def UnnestPhrase(self):
return 'UNNEST({0}) as {1}'
def ArrayPhrase(self):
return 'ARRAY[%s]'
def GroupBySpecBy(self):
return 'name'
class Trino(Dialect):
"""Trino analytic engine dialect."""
def Name(self):
return 'Trino'
def BuiltInFunctions(self):
return {
'Range': 'SEQUENCE(0, %s - 1)',
'ToString': 'CAST(%s AS VARCHAR)',
'ToInt64': 'CAST(%s AS BIGINT)',
'ToFloat64': 'CAST(%s AS DOUBLE)',
'AnyValue': 'ARBITRARY(%s)'
}
def InfixOperators(self):
return {
'++': 'CONCAT(%s, %s)',
}
def Subscript(self, record, subscript):
return '%s.%s' % (record, subscript)
def LibraryProgram(self):
return trino_library.library
def UnnestPhrase(self):
return 'UNNEST({0}) as pushkin({1})'
def ArrayPhrase(self):
return 'ARRAY[%s]'
def GroupBySpecBy(self):
return 'index'
class Presto(Dialect):
def Name(self):
return 'Presto'
def BuiltInFunctions(self):
return {
'Range': 'SEQUENCE(0, %s - 1)',
'ToString': 'CAST(%s AS VARCHAR)',
'ToInt64': 'CAST(%s AS BIGINT)',
'ToFloat64': 'CAST(%s AS DOUBLE)',
'AnyValue': 'ARBITRARY(%s)'
}
def InfixOperators(self):
return {
'++': 'CONCAT(%s, %s)',
}
def Subscript(self, record, subscript):
return '%s.%s' % (record, subscript)
def LibraryProgram(self):
return presto_library.library
def UnnestPhrase(self):
return 'UNNEST({0}) as pushkin({1})'
def ArrayPhrase(self):
return 'ARRAY[%s]'
def GroupBySpecBy(self):
return 'index'
DIALECTS = {
'bigquery': BigQueryDialect,
'sqlite': SqLiteDialect,
'psql': PostgreSQL,
'presto': Presto,
'trino': Trino
}
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-m5p>
#
# License: 3-clause BSD, <https://github.com/smarie/python-m5p/blob/main/LICENSE>
from m5py.main import M5Prime
from m5py.export import export_text_m5
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from os import path as _path
__version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir))
__all__ = [
"__version__",
# submodules
"main",
"export",
# symbols
"M5Prime",
"export_text_m5"
]
|
import pytest
from tokki.abc import Client, Project, Repo
AGENT = "Tests for Tokki +(https://github.com/ChomusukeBot/Tokki)"
@pytest.mark.asyncio
async def test_no_agent():
with pytest.raises(TypeError, match=r": 'useragent'"):
Client()
@pytest.mark.asyncio
async def test_rest():
client = Client(AGENT)
await client._get_request("https://httpbin.org/get")
await client._post_request("https://httpbin.org/post", {})
@pytest.mark.asyncio
async def test_not_implemented():
client = Client(AGENT)
with pytest.raises(TypeError):
Project(None, client)
with pytest.raises(TypeError):
Repo(None, client)
|
from gdsfactory.component import Component, ComponentReference
try:
import dphox as dp
DPHOX_IMPORTED = True
except ImportError:
DPHOX_IMPORTED = False
def from_dphox(device: "dp.Device", foundry: "dp.foundry.Foundry") -> Component:
"""Converts a Dphox Device into a gdsfactory Component.
Note that you need to install dphox `pip install dphox`
https://dphox.readthedocs.io/en/latest/index.html
Args:
device: Dphox device
foundry: Dphox foundry object
"""
c = Component(device.name)
for layer_name, shapely_multipolygon in device.layer_to_polys.items():
for poly in shapely_multipolygon:
layer = foundry.layer_to_gds_label[layer_name]
c.add_polygon(points=poly, layer=layer)
for ref in device.child_to_device:
child = from_dphox(device.child_to_device[ref], foundry)
for gds_transform in device.child_to_transform[ref][-1]:
new_ref = ComponentReference(
component=child,
origin=(gds_transform.x, gds_transform.y),
rotation=gds_transform.angle,
magnification=gds_transform.mag,
x_reflection=gds_transform.flip_y,
)
new_ref.owner = c
c.add(new_ref)
for port_name, port in device.port.items():
c.add_port(
name=port_name,
midpoint=(port.x, port.y),
orientation=port.a,
width=port.w,
layer=foundry.layer_to_gds_label.get(port.layer, (1, 0)),
)
return c
if __name__ == "__main__":
from dphox.demo import mzi
c = from_dphox(mzi, foundry=dp.foundry.FABLESS)
c.show()
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
from auto_utils import (
get_kw_count,
get_word_count,
load_keywords,
get_wset,
get_wlen,
get_wfreq,
CAP,
CUP,
arrange_into_freq_bins,
)
PRE = "/home/santosh/tools/kaldi/egs/indic/"
PATHS = {
"tel": [
"tel_keywords/OLD/sel_iter_22/iter_22_588.txt",
"data/train/text",
"data/test/text",
],
"tam": ["tam_keywords/OLD/572_keywords.txt", "data/train/text", "data/test/text"],
"guj": ["guj/596_keywords_Guj.txt", "data/train/text", "data/test/text"],
}
def main():
keywords = load_keywords(args.keyword_file)
print("# keywords", len(keywords))
train_kw_count = get_kw_count(args.train_text, keywords)
dev_kw_count = get_kw_count(args.dev_text, keywords)
test_kw_count = get_kw_count(args.test_text, keywords)
train_wset = get_wset(train_kw_count)
dev_wset = get_wset(dev_kw_count)
test_wset = get_wset(test_kw_count)
train_count = get_wfreq(train_kw_count)
dev_count = get_wfreq(dev_kw_count)
test_count = get_wfreq(test_kw_count)
test_w_count = get_word_count(args.train_text)
arrange_into_freq_bins(get_wfreq(test_w_count), 25)
tdt_set = (test_wset & train_wset) & dev_wset
print(f"C = (train {CAP} dev {CAP} test):", len(tdt_set))
tt_set = (train_wset & test_wset) - tdt_set
print(f"T = (train {CAP} test) - C :", len(tt_set))
dt_set = (dev_wset & test_wset) - tdt_set
print(f"D = (dev {CAP} test) - C :", len(dt_set))
t_set = test_wset - (tdt_set | tt_set | dt_set)
print(f"test - (C {CUP} T {CUP} D) :", len(t_set))
not_in_train = {}
not_in_train_count = []
for kw in keywords:
if kw not in train_kw_count:
not_in_train[kw] = test_kw_count[kw]
not_in_train_count.append(test_kw_count[kw])
print("Not in train:", len(not_in_train))
density = False
plt.rc("text", usetex=True)
plt.style.use("thesis")
plt.figure()
plt.hist(
train_count,
bins=np.unique(train_count),
color="C0",
density=density,
label="Training",
alpha=0.5,
)
plt.hist(
test_count,
bins=np.unique(test_count),
color="C1",
density=density,
label="Test",
alpha=0.5,
)
plt.hist(
not_in_train_count,
bins=np.unique(not_in_train_count),
color="C2",
density=density,
label="Not in training",
alpha=0.5,
)
plt.xlabel("Keyword occurrence bin")
plt.ylabel("Number of occurrences")
plt.grid()
plt.legend(loc="best")
plt.show()
print(np.histogram(test_count, bins=np.unique(test_count))[1].tolist())
print(not_in_train_count)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("keyword_file")
parser.add_argument("-train_text", default="data/train/text")
parser.add_argument("-dev_text", default="data/dev/text")
parser.add_argument("-test_text", default="data/test/text")
args = parser.parse_args()
main()
|
import scrypt
from typing import Union
class ArgumentError(Exception):
pass
def generate_digest(message: str,
password: str = None,
maxtime: Union[float, int] = 0.5,
salt: str = "",
length: int = 64) -> bytes:
"""Multi-arity function for generating a digest.
Use KDF symmetric encryption given a password.
Use deterministic hash function given a salt (or lack of password).
"""
if password and salt:
raise ArgumentError("only provide a password or a salt, not both")
if salt != "" and len(salt) < 16:
raise ArgumentError("salts need to be minimum of 128bits (~16 characters)")
if password:
return scrypt.encrypt(message, password, maxtime=maxtime)
else:
return scrypt.hash(message, salt, buflen=length)
def decrypt_digest(digest: bytes,
password: str,
maxtime: Union[float, int] = 0.5) -> bytes:
"""Decrypts digest using given password."""
return scrypt.decrypt(digest, password, maxtime)
def validate_digest(digest: bytes,
password: str,
maxtime: Union[float, int] = 0.5) -> bool:
"""Validate digest using given password."""
try:
scrypt.decrypt(digest, password, maxtime)
return True
except scrypt.error:
return False
|
try:
from importlib_resources import files
except ImportError:
from importlib.resources import files
__version__ = files("pyjet").joinpath("VERSION.txt").read_text().strip()
version = __version__
version_info = __version__.split(".")
FASTJET_VERSION = "3.3.4"
FJCONTRIB_VERSION = "1.045"
|
#!/usr/bin/env python3
import glob, os, sys
from collections import defaultdict
import random
from shutil import copyfile
#List of schema parts to allow us to generate our own with dynamic tables
schemaparts = {
"PART": """
BEGIN;
CREATE TABLE PART (
P_PARTKEY SERIAL PRIMARY KEY,
P_NAME VARCHAR(55),
P_MFGR CHAR(25),
P_BRAND CHAR(10),
P_TYPE VARCHAR(25),
P_SIZE INTEGER,
P_CONTAINER CHAR(10),
P_RETAILPRICE DECIMAL,
P_COMMENT VARCHAR(23){}
);
COPY part FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"REGION": """
BEGIN;
CREATE TABLE REGION (
R_REGIONKEY SERIAL PRIMARY KEY,
R_NAME CHAR(25),
R_COMMENT VARCHAR(152){}
);
COPY region FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"NATION": """
BEGIN;
CREATE TABLE NATION (
N_NATIONKEY SERIAL PRIMARY KEY,
N_NAME CHAR(25),
N_REGIONKEY BIGINT NOT NULL, -- references R_REGIONKEY
N_COMMENT VARCHAR(152){}
);
COPY nation FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"SUPPLIER": """
BEGIN;
CREATE TABLE SUPPLIER (
S_SUPPKEY SERIAL PRIMARY KEY,
S_NAME CHAR(25),
S_ADDRESS VARCHAR(40),
S_NATIONKEY BIGINT NOT NULL, -- references N_NATIONKEY
S_PHONE CHAR(15),
S_ACCTBAL DECIMAL,
S_COMMENT VARCHAR(101){}
);
COPY supplier FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"CUSTOMER": """
BEGIN;
CREATE TABLE CUSTOMER (
C_CUSTKEY SERIAL PRIMARY KEY,
C_NAME VARCHAR(25),
C_ADDRESS VARCHAR(40),
C_NATIONKEY BIGINT NOT NULL, -- references N_NATIONKEY
C_PHONE CHAR(15),
C_ACCTBAL DECIMAL,
C_MKTSEGMENT CHAR(10),
C_COMMENT VARCHAR(117){}
);
COPY customer FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"PARTSUPP": """
BEGIN;
CREATE TABLE PARTSUPP (
PS_PARTKEY BIGINT NOT NULL, -- references P_PARTKEY
PS_SUPPKEY BIGINT NOT NULL, -- references S_SUPPKEY
PS_AVAILQTY INTEGER,
PS_SUPPLYCOST DECIMAL,
PS_COMMENT VARCHAR(199){},
PRIMARY KEY (PS_PARTKEY, PS_SUPPKEY)
);
COPY partsupp FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"ORDERS": """
BEGIN;
CREATE TABLE ORDERS (
O_ORDERKEY SERIAL PRIMARY KEY,
O_CUSTKEY BIGINT NOT NULL, -- references C_CUSTKEY
O_ORDERSTATUS CHAR(1),
O_TOTALPRICE DECIMAL,
O_ORDERDATE DATE,
O_ORDERPRIORITY CHAR(15),
O_CLERK CHAR(15),
O_SHIPPRIORITY INTEGER,
O_COMMENT VARCHAR(79){}
);
COPY orders FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n""",
"LINEITEM": """
BEGIN;
CREATE TABLE LINEITEM (
L_ORDERKEY BIGINT NOT NULL, -- references O_ORDERKEY
L_PARTKEY BIGINT NOT NULL, -- references P_PARTKEY (compound fk to PARTSUPP)
L_SUPPKEY BIGINT NOT NULL, -- references S_SUPPKEY (compound fk to PARTSUPP)
L_LINENUMBER INTEGER,
L_QUANTITY DECIMAL,
L_EXTENDEDPRICE DECIMAL,
L_DISCOUNT DECIMAL,
L_TAX DECIMAL,
L_RETURNFLAG CHAR(1),
L_LINESTATUS CHAR(1),
L_SHIPDATE DATE,
L_COMMITDATE DATE,
L_RECEIPTDATE DATE,
L_SHIPINSTRUCT CHAR(25),
L_SHIPMODE CHAR(10),
L_COMMENT VARCHAR(44){},
PRIMARY KEY (L_ORDERKEY, L_LINENUMBER)
);
COPY lineitem FROM '{}' WITH (FORMAT csv, DELIMITER '|');
COMMIT;\n"""
}
#Density defines the percentage of rows that take value `1`
def generate_density_distribution(numrows, density):
#Determine the count of each number
numones = int(round(numrows*(density/100)))
numzeros = int(round(numrows*((100-density)/100)))
#Generate the deck that has the correct counts
deck = ([1] * numones) + ([0] * numzeros)
#Shuffle the deck
random.shuffle(deck)
return deck
def get_file_lines(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def parseArguments():
#Check the arguments
if len(sys.argv) == 1:
print("\n\nNo config file or .tbl directory specified!")
print("Run this script eg: `python insertchoice.py /path/to/config.csv /path/to/.tbl/directory/`")
print("or `python insertchoice.py help` for more information.")
sys.exit()
elif len(sys.argv) == 2 and sys.argv[1] != "help":
print(type(sys.argv[1]))
print(sys.argv[1])
print("\n\nNo .tbl directory specified!")
print("Run this script eg: `python insertchoice.py /path/to/config.csv /path/to/.tbl/directory/`")
print("or `python insertchoice.py help` for more information.")
sys.exit()
elif sys.argv[1] == "help":
print("\n\nTo run this script, use `python insertchoice.py /path/to/config.csv /path/to/.tbl/directory/`")
print("...or `python insertchoice.py /path/to/config.csv /path/to/.tbl/directory/ /path/to/output/directory/`")
print("\nThis script adds 'Choice' columns to a given schema.")
print("Choice columns are columns that represent a user's consent for a given column to be shared.")
print("This script autopopulates choice columns to a given density.")
print("For a density of `99`, this means that 99\% of the choice rows will be consent (represented as integer 1) " \
"and the remaining 1% will be non-consent (represented as integer 0).")
print("\nThis script generates a schema.sql file that can be used to import the schema into postgres "\
"using the `\i /path/to/my/schema.sql` command. " \
"Note that this schema uses paths to the csv files generated by this script. " \
"This means that moving these csv files will cause the schema.sql file to fail." \
"You may optionally specify an output path as the third argument to facilitate movement.")
print("\nThe config.csv file specified in the arguments is a csv file (without column headings) representing the (table,column,density), eg:")
print("\ncustomer,c_name,99")
print("customer,c_address,50")
print("customer,c_phone,33")
print("\nThe /path/to/the/.tbl/directory/ should be the directory where the .tbl files " \
"generated by a program like tpch's dbgen are stored.")
sys.exit()
elif len(sys.argv) == 3:
configfile = os.path.abspath(sys.argv[1])
datadir = os.path.abspath(sys.argv[2])
outputdir = os.path.abspath(datadir)
elif len(sys.argv) == 4:
configfile = os.path.abspath(sys.argv[1])
datadir = os.path.abspath(sys.argv[2])
outputdir = os.path.abspath(sys.argv[3])
else:
print("\n\nToo many arguments specified!")
print("Run this script eg: `python insertchoice.py /path/to/config.csv /path/to/.tbl/directory/`")
print("or `python insertchoice.py help` for more information.")
sys.exit()
return (configfile, datadir, outputdir)
def parseConfigLine(line, choices):
parts = line.split(",")
if len(parts) is not 3 and len(parts) is not 0:
raise ValueError("Choice lines must have 3 fields!")
table = parts[0].upper()
column = parts[1].upper()
density = int(parts[2])
choices[table][column].append(density)
def parseConfig(configfile):
#Parse the config file to generate the internal choices density representation
#Map of FileName (Table Name) -> (ColumnName -> list[densities])
choices = defaultdict(lambda: defaultdict(list))
generating = "both"
with open(configfile) as config:
for index, line in enumerate(config):
#First line should indicate which of internal, external (or both) to generate
if index is 0 and len(line.split(",")) is 1:
generating = line.split("\n")[0]
continue
parseConfigLine(line, choices)
if generating not in ["both", "internal", "external"]:
raise ValueError("The selection of whether to generate `internal` choices, " +
"`external` choices, or `both` was not valid. (Recieved `" + generating + "`)\n" +
"Please replace the first line of your config file (" + configfile + ") " +
"with one of `internal`, `external`, or `both`.")
return (generating, choices)
def generate_new_line(line, index, density_dist_list):
newline = line.split("\n")[0]
for density_dist in density_dist_list:
newline += "|" + str(density_dist[index])
return newline + "\n"
def createCSVInternal(datadir, outputdir, choices):
#Generate the new CSV files with the new data appended per line
filepaths = {}
for filename in os.listdir(datadir):
if not filename.endswith(".tbl"):
continue
filename_noext = filename.split(".")[0]
filepath = os.path.join(datadir, filename)
tablename = filename_noext.upper()
#Define a file to save the new data to
newfilepath = os.path.join(outputdir, filename_noext + "_internal.csv")
if len(choices[tablename].items()) is 0:
newfilepath = os.path.join(outputdir, filename_noext + ".csv")
copyfile(filepath, newfilepath)
print("Created " + newfilepath)
continue
filepaths[tablename] = newfilepath
#Get the number of lines in this file
file_lines = get_file_lines(filepath)
#Generate a list of density distributions for the choices columns in this table
density_dist_list = []
for column, density_list in choices[tablename].items():
for percentage in density_list:
density_dist_list.append(generate_density_distribution(file_lines,int(percentage)))
with open(filepath) as file:
with open(newfilepath, "w") as file2:
for index, line in enumerate(file):
file2.write(generate_new_line(line, index, density_dist_list))
print("Created " + newfilepath)
return filepaths
def getPrimaryKeyLine(table, line):
primaryKeyIndexes = {
"PART": [0],
"REGION": [0],
"NATION": [0],
"SUPPLIER": [0],
"CUSTOMER": [0],
"PARTSUPP": [0,1],
"ORDERS": [0],
"LINEITEM": [0,3]
}
primaryKey = ""
splitline = line.split("|")
for i in primaryKeyIndexes[table]:
primaryKey += splitline[i] + "|"
return primaryKey
def getPrimaryKeyNames(table):
allPrimaryKeyNames = {
"PART": ["P_PARTKEY BIGINT NOT NULL"],
"REGION": ["R_REGIONKEY BIGINT NOT NULL"],
"NATION": ["N_NATIONKEY BIGINT NOT NULL"],
"SUPPLIER": ["S_SUPPKEY BIGINT NOT NULL"],
"CUSTOMER": ["C_CUSTKEY BIGINT NOT NULL"],
"PARTSUPP": ["PS_PARTKEY BIGINT NOT NULL", "PS_SUPPKEY BIGINT NOT NULL"],
"ORDERS": ["O_ORDERKEY BIGINT NOT NULL"],
"LINEITEM": ["L_ORDERKEY BIGINT NOT NULL", "L_LINENUMBER INTEGER"],
}
return allPrimaryKeyNames[table]
def createCSVExternalMultiple(datadir, outputdir, choices):
filepaths = {}
choiceTablePaths = {}
for filename in os.listdir(datadir):
if not filename.endswith(".tbl"):
continue
filename_noext = filename.split(".")[0]
filepath = os.path.join(datadir, filename)
tablename = filename_noext.upper()
newfilepath = os.path.join(outputdir, filename_noext + ".csv")
filepaths[tablename] = newfilepath
copyfile(filepath, newfilepath)
print("Created " + newfilepath)
#Get the number of lines in this file
file_lines = get_file_lines(filepath)
density_dist_dict = {}
files = {}
for column, density_list in choices[tablename].items():
for percentage in density_list:
choiceTableFilename = tablename.lower() + "_" + column.lower() + "_" + str(percentage) + ".csv"
choiceTablePath = os.path.join(outputdir, choiceTableFilename)
density_dist_dict[choiceTablePath] = generate_density_distribution(file_lines,int(percentage))
choiceTablePaths[choiceTablePath] = (tablename, column, percentage)
files[choiceTablePath] = open(choiceTablePath, "w")
with open(filepath) as sourcefile:
for index, line in enumerate(sourcefile):
for filepath, density_dist in density_dist_dict.items():
file = files[filepath]
output = getPrimaryKeyLine(tablename, line) + str(density_dist[index]) + "\n"
file.write(output)
for filepath in density_dist_dict:
print("Created " + filepath)
return (filepaths, choiceTablePaths)
#Generate the schema to load in these files
def generateInternalSchema(choices, filepaths, outputdir):
schema = ""
#Define a bunch of data to insert into the string format
for tablename, filepath in filepaths.items():
sql_column_definition = ","
for column, densities in choices[tablename].items():
for density in densities:
sql_column_definition += "\n" + (" "*16) + column + "_CHOICE_" + str(density) + " INTEGER,"
#Append the column definition
schema += schemaparts[tablename.upper()].format(sql_column_definition[:-1], filepaths[tablename])
if "SUPPLIER" in filepaths and "NATION" in filepaths:
schema += "ALTER TABLE SUPPLIER ADD FOREIGN KEY (S_NATIONKEY) REFERENCES NATION(N_NATIONKEY);\n"
if "PARTSUPP" in filepaths and "PART" in filepaths:
schema += "ALTER TABLE PARTSUPP ADD FOREIGN KEY (PS_PARTKEY) REFERENCES PART(P_PARTKEY);\n"
if "PARTSUPP" in filepaths and "SUPPLIER" in filepaths:
schema += "ALTER TABLE PARTSUPP ADD FOREIGN KEY (PS_SUPPKEY) REFERENCES SUPPLIER(S_SUPPKEY);\n"
if "CUSTOMER" in filepaths and "NATION" in filepaths:
schema += "ALTER TABLE CUSTOMER ADD FOREIGN KEY (C_NATIONKEY) REFERENCES NATION(N_NATIONKEY);\n"
if "ORDERS" in filepaths and "CUSTOMER" in filepaths:
schema += "ALTER TABLE ORDERS ADD FOREIGN KEY (O_CUSTKEY) REFERENCES CUSTOMER(C_CUSTKEY);\n"
if "LINEITEM" in filepaths and "ORDERS" in filepaths:
schema += "ALTER TABLE LINEITEM ADD FOREIGN KEY (L_ORDERKEY) REFERENCES ORDERS(O_ORDERKEY);\n"
if "LINEITEM" in filepaths and "PARTSUPP" in filepaths:
schema += "ALTER TABLE LINEITEM ADD FOREIGN KEY (L_PARTKEY,L_SUPPKEY) REFERENCES PARTSUPP(PS_PARTKEY,PS_SUPPKEY);\n"
if "NATION" in filepaths and "REGION" in filepaths:
schema += "ALTER TABLE NATION ADD FOREIGN KEY (N_REGIONKEY) REFERENCES REGION(R_REGIONKEY);\n"
schemafilelocation = os.path.join(outputdir, "schema_internal.sql")
with open(schemafilelocation, "w") as schemafile:
schemafile.write(schema)
print("Created " + schemafilelocation)
def generateExternalSchema(choices, filepathsTables, filepathsChoices, outputdir):
#Generate the base schema
schema = ""
for tablename, filepath in filepathsTables.items():
schema += schemaparts[tablename.upper()].format("", filepathsTables[tablename])
if "SUPPLIER" in filepathsTables and "NATION" in filepathsTables:
schema += "ALTER TABLE SUPPLIER ADD FOREIGN KEY (S_NATIONKEY) REFERENCES NATION(N_NATIONKEY);\n"
if "PARTSUPP" in filepathsTables and "PART" in filepathsTables:
schema += "ALTER TABLE PARTSUPP ADD FOREIGN KEY (PS_PARTKEY) REFERENCES PART(P_PARTKEY);\n"
if "PARTSUPP" in filepathsTables and "SUPPLIER" in filepathsTables:
schema += "ALTER TABLE PARTSUPP ADD FOREIGN KEY (PS_SUPPKEY) REFERENCES SUPPLIER(S_SUPPKEY);\n"
if "CUSTOMER" in filepathsTables and "NATION" in filepathsTables:
schema += "ALTER TABLE CUSTOMER ADD FOREIGN KEY (C_NATIONKEY) REFERENCES NATION(N_NATIONKEY);\n"
if "ORDERS" in filepathsTables and "CUSTOMER" in filepathsTables:
schema += "ALTER TABLE ORDERS ADD FOREIGN KEY (O_CUSTKEY) REFERENCES CUSTOMER(C_CUSTKEY);\n"
if "LINEITEM" in filepathsTables and "ORDERS" in filepathsTables:
schema += "ALTER TABLE LINEITEM ADD FOREIGN KEY (L_ORDERKEY) REFERENCES ORDERS(O_ORDERKEY);\n"
if "LINEITEM" in filepathsTables and "PARTSUPP" in filepathsTables:
schema += "ALTER TABLE LINEITEM ADD FOREIGN KEY (L_PARTKEY,L_SUPPKEY) REFERENCES PARTSUPP(PS_PARTKEY,PS_SUPPKEY);\n"
if "NATION" in filepathsTables and "REGION" in filepathsTables:
schema += "ALTER TABLE NATION ADD FOREIGN KEY (N_REGIONKEY) REFERENCES REGION(R_REGIONKEY);\n"
#Generate the schema for our choice tables.
for choiceTablePath, infoTuple in filepathsChoices.items():
foreignTableName = infoTuple[0]
columnName = infoTuple[1]
density = infoTuple[2]
#Create the name for our new table
choiceTableName = foreignTableName + "_" + columnName + "_" + str(density)
schema += "\n" + (" "*8) + "BEGIN;\n"
schema += (" "*12) + "CREATE TABLE " + choiceTableName + " (\n"
#Create the primary key column definitions
primaryKeyNames = getPrimaryKeyNames(foreignTableName)
for primaryKeyPart in primaryKeyNames:
schema += (" "*16) + primaryKeyPart + ",\n"
#Add our choice column
schema += (" "*16) + columnName + "_CHOICE INTEGER,\n"
#Get just the name, eg `C_CUSTKEY` (from `C_CUSTKEY BIGINT NOT NULL`)
#and join it with any other parts that form the primary key
#Also useful for defining foreign key
joinedNames = ",".join([name.split(" ")[0] for name in primaryKeyNames])
#Add the primaryKey definition (referencing the primary key columns)
schema += (" "*24) + "PRIMARY KEY (" + joinedNames + ")\n"
#Add the copy command
schema += (" "*12) + ");\n"
schema += (" "*12) + "COPY " + choiceTableName + " FROM '" + choiceTablePath + "' "
schema += "WITH (FORMAT csv, DELIMITER '|');\n"
#Commit and alter the table to add FK
schema += (" "*8) + "COMMIT;\n"
schema += "ALTER TABLE " + choiceTableName + " ADD FOREIGN KEY (" + joinedNames + ") "
schema += "REFERENCES " + foreignTableName + "(" + joinedNames + ");\n"
schemafilelocation = os.path.join(outputdir, "schema_external.sql")
with open(schemafilelocation, "w") as schemafile:
schemafile.write(schema)
print("Created " + schemafilelocation)
args = parseArguments()
configfile = args[0]
datadir = args[1]
outputdir = args[2]
config = parseConfig(configfile)
generating = config[0]
choices = config[1]
if generating == "internal" or generating == "both":
internalFilepaths = createCSVInternal(datadir, outputdir, choices)
generateInternalSchema(choices, internalFilepaths, outputdir)
if generating == "external" or generating == "both":
externalFilepathsTuple = createCSVExternalMultiple(datadir, outputdir, choices)
externalFilepathsTables = externalFilepathsTuple[0]
externalFilepathsChoiceTables = externalFilepathsTuple[1]
generateExternalSchema(choices, externalFilepathsTables, externalFilepathsChoiceTables, outputdir)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .models import Person
from django.shortcuts import render
# Create your views here.
def index(request):
"""
This function takes a request and renders an html in templates for main page.
:param request: A request
:return: a render
"""
people = Person.objects.all()
return render(request, 'index.html', {'people': people})
|
import json
import logging
from typing import List, Optional, Tuple
from sqlalchemy.orm import Session
from telegram.bot import Bot
from app import crud
from app.database import get_db
from app.settings import settings
from app.web import post_async
bot = Bot(token=settings.telegram_bot_token)
TELEGRAM_SET_WEBHOOK_URL = "https://api.telegram.org/bot{token}/setWebhook"
VALID_COMMANDS = ["start", "sub", "unsub", "registered"]
async def set_telegram_webhook_url():
token = settings.telegram_bot_token
data = {"url": f"{settings.telegram_webhook_host}/webhook/{token}"}
res = await post_async(TELEGRAM_SET_WEBHOOK_URL.format(token=token), data)
if res.status_code == 200:
logging.info("Successfully set Telegram webhook URL")
return
content = json.loads(res.content.decode("utf-8"))
if content.get("description") == "Webhook is already set":
return
raise Exception("Failed to set Telegram webhook URL", content)
def send_message(chat_id: int, message: str, parse_mode: Optional[str] = "Markdown"):
bot.send_message(
chat_id, message, parse_mode=parse_mode, disable_web_page_preview=True
)
def handle_commands(chat_id: int, text: str):
command, args = _parse_command(text)
if not command:
send_message(chat_id, "Unrecognized command 😿")
db = next(get_db())
if command == "start":
send_message(chat_id, "hello frens!")
elif command == "sub":
_handle_sub(db, chat_id, args)
elif command == "unsub":
_handle_unsub(db, chat_id, args)
elif command == "registered":
_handle_registered(db, chat_id)
def _parse_command(text: str) -> Tuple[Optional[str], List[str]]:
# Returns command and args if valid, otherwise None
tokens = [token for token in text.strip().split() if token != ""]
if len(tokens) == 0:
return None, []
command = tokens[0].strip("/")
if "@" in command:
command = command.split("@")[0]
if command in VALID_COMMANDS:
return command, tokens[1:]
return None, []
def _handle_sub(db: Session, chat_id: int, args: List[str]):
if len(args) == 0:
send_message(chat_id, "Please provide a keyword to subscribe to")
return
keyword = " ".join(args)
alert = crud.add_contract_alert(db, keyword, chat_id)
if alert:
send_message(chat_id, f"Added alert for `{keyword}`")
else:
send_message(chat_id, f"Alert already exists for `{keyword}`")
def _handle_unsub(db: Session, chat_id: int, args: List[str]):
if len(args) == 0:
send_message(chat_id, "Please provide a keyword to unsubscribe to")
return
keyword = " ".join(args)
removed = crud.remove_contract_alert(db, keyword, chat_id)
if removed:
send_message(chat_id, f"Removed alert for `{keyword}`")
else:
send_message(chat_id, f"No alert exists for `{keyword}`")
def _handle_registered(db: Session, chat_id: int):
alerts = crud.get_registered_contract_alerts(db, chat_id)
keywords = [alert.keyword for alert in alerts]
if len(keywords) > 0:
keyword_str = ", ".join(keywords)
send_message(chat_id, f"Current alerts: `[{keyword_str}]`")
else:
send_message(chat_id, f"No alerts registered in this chat")
|
from django.conf.urls import *
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
# Examples:
# url(r'^$', 'mendelmd.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.upload, name='upload'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
def rule(event):
return (event['eventName'] == 'ConsoleLogin' and
event['userIdentity'].get('type') == 'IAMUser' and
event.get('responseElements', {}).get('ConsoleLogin') == 'Failure')
def dedup(event):
return event['userIdentity'].get('arn')
|
from django.db import models
from hashid_field import HashidAutoField, HashidField
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse # Django <= 1.8
class Author(models.Model):
id = HashidAutoField(primary_key=True)
name = models.CharField(max_length=40)
uid = models.UUIDField(null=True, blank=True)
def __str__(self):
return self.name
class Editor(models.Model):
id = HashidAutoField(primary_key=True, salt="A different salt", min_length=20)
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=40)
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True, blank=True, related_name='books')
reference_id = HashidField(salt="alternative salt", allow_int_lookup=True)
key = HashidField(min_length=10, alphabet="abcdlmnotuvwxyz0123789", null=True, blank=True)
some_number = models.IntegerField(null=True, blank=True)
editors = models.ManyToManyField(Editor, blank=True)
def get_absolute_url(self):
return reverse("library:book-detail", kwargs={'pk': self.pk})
def __str__(self):
return "{} ({})".format(self.name, self.reference_id)
|
import sys
import unittest
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))
from RtmTokenBuilder import *
from AccessToken import *
appID = "970CA35de60c44645bbae8a215061b33"
appCertificate = "5CFd2fd1755d40ecb72977518be15d3b"
userAccount = "test_user"
expireTimestamp = 1446455471
salt = 1
ts = 1111111
class RtmTokenBuilderTest(unittest.TestCase):
def test_(self):
token = RtmTokenBuilder.buildToken(appID, appCertificate, userAccount, Role_Rtm_User, expireTimestamp)
parser = AccessToken()
parser.fromString(token)
self.assertEqual(parser.messages[kRtmLogin], expireTimestamp)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from abc import ABCMeta, abstractmethod
from prettytable import PrettyTable
def print_progress_bar(iteration: int, total: int, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
if iteration == total:
print()
def line_plot(x_data, y_data, x_value, y_value, x_label="", y_label="", title=""):
'''
Функция для построения графика
'''
_, ax = plt.subplots()
# function
ax.plot(x_data, y_data, lw = 2, color = '#539caf', alpha = 0.5)
# discrete values of the function
ax.scatter(x_data[:(len(x_data) // 2 + 1)],
y_data[:(len(y_data) // 2 + 1)],
marker='o', s=15, c='g', alpha=0.6)
# discrete values of the function
ax.scatter(x_value, y_value, marker='o', s=15, c='r', alpha=0.6)
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.grid()
plt.show()
# Абстрактный класс функции
class Func():
__metaclass__ = ABCMeta
@abstractmethod
def compute(self, income: float):
"""Значение функции активации"""
@abstractmethod
def derivative(self, income: float):
"""Производная функции"""
@abstractmethod
def get_id(self):
"""ИД функции"""
# Линейная функуия активации f(net) = net
class AsIs(Func):
def compute(self, income: float):
return income
def derivative(self, income: float):
return 1
def get_id(self):
return 1
assert issubclass(AsIs, Func)
assert isinstance(AsIs(), Func)
# Класс нейрона
class Neuron:
def __init__(self, count: int, function: Func, bias = 1.):
self.weights = np.random.rand(count + 1)
self.f = function # функция активации
self.bias = bias # смещение
self.output = 0. # выход нейрона
# Рассчитать нейрон
def compute(self, inputs :list):
assert len(self.weights) == len(inputs) + 1
# net = Sum Wi*Xi
net = np.dot(self.weights[1:], inputs) + self.bias * self.weights[0]
self.output = self.f.compute(net)
return self.output
# Корректировка весов согласно правилу Видроу-Хоффа (дельта-правило)
def correct_weigts(self, learning_rate: float, local_error: float, inputs: list):
assert learning_rate > 0. and learning_rate <= 1.
# Wi = Wi + n*d*Xi
self.weights += np.append([self.bias], inputs) * (learning_rate * local_error)
def generate_time_points(a :float, b :float, count :int):
return list(np.linspace(a, b, count))
# Прогнозируемая временная функция
def function(t :float):
# return 0.5 * np.sin(0.5 * t) - 0.5
return 0.4 * np.sin(0.3 * t) + 0.5
def neuron_training(neuron: Neuron, learning_set: list, era_count: int, learning_rate: float):
window_size = len(neuron.weights) - 1
for _ in range(era_count):
last_index = len(learning_set) - window_size - 1
for i in range(last_index):
window = learning_set[i:i + window_size]
next_x = neuron.compute(window)
local_error = learning_set[i + window_size] - next_x
neuron.correct_weigts(learning_rate, local_error, window)
def get_forecast(neuron: Neuron, last_values: list, points: list):
window_size = len(neuron.weights) - 1
values = last_values[:]
for i in range(len(points)):
window = values[i:i + window_size]
values.append(neuron.compute(window))
return values[window_size:]
def compute_total_standard_error(real: list, received: list):
assert len(real) == len(received)
error_vector = (np.array(real) - np.array(received)) ** 2
return np.sqrt(error_vector.sum())
def research(points: list, real_values: list, N: int):
ptb = PrettyTable()
ptb.field_names = ['Количество эпох','Размер окна','Норма обучения', 'Суммарная ошибка']
i = 0
k = 10*7*20
print_progress_bar(i, k, prefix='Research:', suffix='Complete', length=50)
for M in range(20000, 0, -2000):
for ws in range(2, 9, 1):
lr = 0.05
while lr < 1.05:
neuron = Neuron(ws, AsIs())
neuron_training(neuron, real_values[:N], M, round(lr, 2))
predicted_values = get_forecast(neuron, real_values[N-ws:N], points[N:])
E = compute_total_standard_error(real_values[N:], predicted_values)
ptb.add_row([M, ws, round(lr, 2), E])
lr += 0.05; i += 1
print_progress_bar(i, k, prefix='Research:', suffix='Complete', length=50)
print_progress_bar(i, k, prefix='Research:', suffix='Complete', length=50)
return ptb
if __name__ == "__main__":
a = -4 # левая граница обучающего интервала t
b = 4 # правая граница обучающего интервала t
c = 2 * b - a # правая граница прогнозируемого интервала t
N = 20 # количесвто точек равномерно расположенных на [a,b]
window_size = int(input('Введите длину скользящего окна: '))
learning_rate = float(input('Введите норму обучения: '))
number_of_eras = int(input('Введите колличество эпох обучения: '))
# точки на интервале [a,c]
points = generate_time_points(a, c, 2 * N)
# значения прогнозируемой функции на точках из интервала [a,c]
real_values = [function(t) for t in points]
# точки на интервале [a,b]
learning_points = points[:N]
# значения прогнозируемой функции на точках из интервала [a,b]
learning_values = real_values[:N]
neuron = Neuron(window_size, AsIs())
neuron_training(neuron, learning_values, number_of_eras, learning_rate)
# точки на интервале (b, c = 2b-a]
predicted_points = points[N:]
# значения последнего окна обучения по которым начнется прогноз
last_values = learning_values[N-window_size:]
# прогнозируемые значения
predicted_values = get_forecast(neuron, last_values, predicted_points)
# Суммарная среднеквадратичная ошибка прогноза
E = compute_total_standard_error(real_values[N:], predicted_values)
# вектор весов
print(neuron.weights)
title = f"Размер окна: {window_size} Норма обучения: {learning_rate}\nКоличество эпох: {number_of_eras} Среднеквадратичная ошибка: {round(E, 4)}"
line_plot(points, real_values, predicted_points, predicted_values, 'x', 'X(t)', title)
# исследование значений ошибок от количества эпох, размера окна, нормы обученя
tb = research(points, real_values, N)
# запись в файл
with open('res.txt', 'w') as f:
f.write(tb.get_string())
print(tb)
|
import numpy
import pandas as pd
import re
import csv
import time
import datetime
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
import sendemail
from datetime import timedelta
args = {
'owner': 'Glynn Family',
}
dag = DAG(
dag_id='Permit_Scrape',
default_args=args,
schedule_interval='13 */12 * * *',
start_date=days_ago(1),
catchup=False,
dagrun_timeout=timedelta(minutes=180),
tags=['Permits', 'Web Scraping'],
)
# [START permit_scrape]
def permit_scrape():
permit_file = Variable.get("PERMIT_FILE")
ffx_driver = Variable.get("FIREFOX_DRIVER_PATH")
df = pd.read_csv(permit_file)
url = Variable.get("PERMIT_URL")
options = FirefoxOptions()
options.add_argument("--headless")
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
headers = {"user-agent": user_agent}
options.add_argument(f'user-agent={user_agent}')
print('starting webdriver')
driver = webdriver.Firefox(executable_path=ffx_driver, options=options)
driver.set_window_size(1920, 1080)
driver.get(url)
# Bypass splash page
driver.find_element_by_xpath("//span[contains(@id, 'cbNewCheckbox')]").click()
driver.find_element_by_xpath("//input[contains(@name, 'btnEnterSite')]").click()
time.sleep(2)
Email_Text = ''
for i, row in df.iterrows():
print(row['Neighborhood'] + ' - ' + row['Property Address'])
searchbar = driver.find_element_by_xpath("//input[contains(@id, 'Gpin')]")
searchbar.clear()
searchbar.send_keys(row['GPIN'])
searchbar.send_keys(Keys.RETURN)
time.sleep(4)
infobox = driver.find_element_by_xpath("//div[contains(@id, 'info')]")
topbox = infobox.find_element_by_xpath(".//div")
topleftbox = topbox.find_element_by_xpath(".//tbody")
topleftfirstline = topleftbox.find_element_by_xpath(".//tr")
owner_name = topleftfirstline.find_elements_by_xpath(".//td")[1]
if owner_name.text == row['Owner']:
pass
else:
print('New Owner: ' + owner_name.text)
df.at[i, 'Owner'] = str(owner_name.text)
bottombox = infobox.find_elements_by_xpath(".//div")[3]
sale_date = bottombox.find_elements_by_xpath(".//td")[1]
print('Updating sale date to: ' + sale_date.text)
df.at[i, 'Sale Date'] = str(sale_date.text)
Email_Text += '\n' + str(row['Property Address']) + ' - New Owner: ' + owner_name.text + ' on ' + sale_date.text
permits = driver.find_element_by_partial_link_text("Permits").click()
time.sleep(0.5)
table_header = 0
permit_table = driver.find_element_by_xpath("//div[contains(@id, 'Permits')]")
permit_rows = permit_table.find_elements_by_xpath(".//tr")
for permit in permit_rows:
if table_header == 0:
table_header = 1
else:
permit_type = permit.find_elements_by_xpath(".//td")[6].text
permit_status = permit.find_elements_by_xpath(".//td")[2].text
if permit_type == 'Building':
permit_date = permit.find_elements_by_xpath(".//td")[4].text
cur_y = int(re.search('\d+\/\d+\/(\d+)', permit_date).group(1))
cur_d = int(re.search('\d+\/(\d+)\/\d+', permit_date).group(1))
cur_m = int(re.search('(\d+)\/\d+\/\d+', permit_date).group(1))
cur_date = datetime.datetime(cur_y, cur_m, cur_d)
if row['Permit Date'] != "Unk":
prev_y = int(re.search('\d+\/\d+\/(\d+)', row['Permit Date']).group(1))
prev_d = int(re.search('\d+\/(\d+)\/\d+', row['Permit Date']).group(1))
prev_m = int(re.search('(\d+)\/\d+\/\d+', row['Permit Date']).group(1))
prev_date = datetime.datetime(prev_y, prev_m, prev_d)
else:
prev_date = datetime.datetime(2000, 1, 1)
if prev_date == cur_date:
if permit_status == row['Permit Status']:
pass
else:
df.at[i, 'Permit Status'] = str(permit_status)
print('Updating permit status to ' + permit_status)
Email_Text += '\n' + str(row['Property Address']) + ' - New Permit Status: ' + str(permit_status) + ' on ' + str(permit_date)
elif prev_date > cur_date:
pass
else:
df.at[i, 'Permit Date'] = str(permit_date)
print('Updating permit date to ' + permit_date)
if permit_status == row['Permit Status']:
pass
else:
df.at[i, 'Permit Status'] = str(permit_status)
print('Updating permit status to ' + permit_status)
Email_Text += '\n' + str(row['Property Address']) + ' - New Permit Status: ' + str(permit_status) + ' on ' + str(permit_date)
return_to_search = driver.find_element_by_xpath('//a[@href="#searchtab"]').click()
time.sleep(0.5)
df.to_csv(permit_file, index=False)
if len(Email_Text) > 0:
sendemail.sendEmail(Variable.get("PERMIT_EMAIL"), Variable.get("PERMIT_RECIPIENTS"), Variable.get("PERMIT_SUBJECT"), Email_Text )
permit_scrape = PythonOperator(
task_id='permit_scrape',
python_callable=permit_scrape,
dag=dag,
)
# [END permit_scrape]
permit_scrape
|
"""
Author: Arthur Wesley
"""
from unittest import TestCase
import sente
import numpy as np
from assert_does_not_raise import DoesNotRaiseTestCase
class TestBasicMethods(DoesNotRaiseTestCase):
def test_constructor(self):
"""
checks that all possible constructors work
:return:
"""
with self.assertDoesNotRaise(Exception):
sente.Game()
sente.Game(13)
sente.Game(19)
sente.Game(9, sente.rules.CHINESE)
sente.Game(rules=sente.rules.CHINESE)
sente.Game(rules=sente.rules.JAPANESE)
sente.Game(rules=sente.rules.JAPANESE, board_size=13)
with self.assertRaises(ValueError):
sente.Game(15, sente.rules.JAPANESE)
def test_play(self):
"""
checks to see that the syntax is valid
:return:
"""
game = sente.Game()
with self.assertDoesNotRaise(Exception):
game.play(3, 15, sente.stone.BLACK)
game.play(15, 15)
with self.assertRaises(sente.exceptions.IllegalMoveException):
game.play(3, 15)
def test_get_active_player(self):
"""
tests to see if the get_active_player() method works
:return:
"""
game = sente.Game()
self.assertEqual(sente.stone.BLACK, game.get_active_player())
game.play(4, 4)
self.assertEqual(sente.stone.WHITE, game.get_active_player())
def test_score_unfinished_game(self):
"""
try to score an unfinished game
:return:
"""
game = sente.Game()
with self.assertRaises(ValueError):
game.score()
with self.assertRaises(ValueError):
game.get_winner()
def test_score_resigned_game(self):
"""
try to score a game that has been resigned
:return:
"""
game = sente.Game()
game.resign()
with self.assertRaises(ValueError):
game.score()
with self.assertDoesNotRaise(ValueError):
game.get_winner()
def test_get_legal_moves(self):
"""
tests to see if the legal moves
:return:
"""
game = sente.Game(9)
self.assertEqual(83, len(game.get_legal_moves()))
def test_score_empty_game(self):
"""
tests to see if we can score an empty game
:return:
"""
game = sente.Game()
game.pss()
game.pss()
with self.assertDoesNotRaise(Exception):
game.score()
def test_get_point(self):
"""
tests to see if the get point() method works
:return:
"""
game = sente.Game()
game.play(4, 5)
self.assertEqual(game.get_point(4, 5), sente.stone.BLACK)
with self.assertRaises(IndexError):
game.get_point(20, 20)
with self.assertRaises(IndexError):
game.get_point(30, 30)
def test__str__(self):
"""
tests to see if __str__ can be called on the object
:return: None
"""
game = sente.Game()
self.assertEqual(str(game.get_board()), str(game))
class TestMetadata(TestCase):
def test_add_metadata(self):
"""
tests to see if metadata can be added to a game
:return:
"""
game = sente.Game()
game.set_property("C", "This is a comment")
self.assertEqual(game.comment, "This is a comment")
def test_non_root_metadata(self):
"""
tests to see if non root metadata is only available at the node it is put at
:return:
"""
game = sente.Game()
game.comment = "TILL THEN I WALK ALONG"
game.play(4, 4)
self.assertEqual(game.comment, "")
game.step_up()
self.assertEqual(game.comment, "TILL THEN I WALK ALONG")
def test_add_not_at_root(self):
"""
tests to see if root attributes can be added at places other than the root
:return:
"""
game = sente.Game()
game.play(4, 4)
game.set_property("AP", "Sente")
self.assertEqual(game.get_properties()["AP"], "Sente")
game.step_up()
self.assertEqual(game.get_properties()["AP"], "Sente")
def test_closing_bracket_backslash_invisible(self):
"""
makes sure that if SGF puts a backslash into a SGF field
:return:
"""
game = sente.Game()
game.set_property("C", "[]")
self.assertEqual(game.comment, "[]")
class TestTreeNavigation(TestCase):
def test_advance_to_root(self):
"""
tests to see if the reset_to_root method works
:return:
"""
game = sente.Game()
game.play(3, 3)
game.play(3, 4)
game.play(4, 3)
game.play(4, 4)
game.play(3, 15)
game.play(15, 3)
game.play(15, 15)
game.advance_to_root()
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 4))
self.assertEqual(sente.stone.EMPTY, game.get_point(4, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(4, 4))
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 15))
self.assertEqual(sente.stone.EMPTY, game.get_point(15, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(15, 15))
def test_undo_move(self):
"""
tests to see if moves can be undone
:return:
"""
game = sente.Game()
# create a basic tree
game.play(3, 3)
game.play(3, 15)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(3, 15))
game.step_up()
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 15))
def test_undo_redo(self):
"""
tests to see if a game
:return:
"""
game = sente.Game()
# create a basic tree
game.play(3, 3)
game.play(3, 15)
game.step_up()
game.play(3, 15)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(3, 15))
def test_undo_multiple(self):
"""
tests to see if multiple undos works
:return:
"""
game = sente.Game()
game.play(3, 3)
game.play(3, 15)
game.play(15, 3)
game.play(15, 15)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(3, 15))
self.assertEqual(sente.stone.BLACK, game.get_point(15, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(15, 15))
game.step_up(3)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 15))
self.assertEqual(sente.stone.EMPTY, game.get_point(15, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(15, 15))
def test_simple_fork(self):
"""
tests to see if the tree can navigate a simple fork
:return:
"""
game = sente.Game()
game.play(3, 3)
game.play(3, 15)
game.step_up()
game.play(15, 3)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(15, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 15))
# switch back to the other branch
game.step_up()
game.play(3, 15)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.EMPTY, game.get_point(15, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(3, 15))
def test_get_branches(self):
"""
tests to see if the get_branches() method works
:return:
"""
game = sente.Game()
game.play(3, 3)
game.play(3, 15)
game.step_up()
game.play(15, 3)
game.step_up()
game.play(15, 15)
game.step_up()
branches = game.get_branches()
self.assertEqual(3, len(branches))
self.assertIn(sente.Move(14, 2, sente.stone.WHITE), branches)
self.assertIn(sente.Move(14, 14, sente.stone.WHITE), branches)
self.assertIn(sente.Move(2, 14, sente.stone.WHITE), branches)
def test_play_moves(self):
"""
tests to see if the play_moves method is working
:return:
"""
game = sente.Game()
moves = [sente.Move(2, 2, sente.stone.BLACK), sente.Move(4, 4, sente.stone.WHITE), sente.Move(6, 6, sente.stone.BLACK)]
game.play_sequence(moves)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
self.assertEqual(sente.stone.WHITE, game.get_point(5, 5))
self.assertEqual(sente.stone.BLACK, game.get_point(7, 7))
def test_get_move_sequence(self):
"""
checks to see if the game generates the correct sequence of moves
:return:
"""
game = sente.Game()
moves = [sente.Move(3, 3, sente.stone.BLACK), sente.Move(5, 5, sente.stone.WHITE), sente.Move(7, 7, sente.stone.BLACK)]
game.play_sequence(moves)
self.assertEqual(moves, game.get_current_sequence())
def test_illegal_move_sequence(self):
"""
checks to see if playing an illegal sequence of moves does not play a single move
:return:
"""
game = sente.Game()
moves = [sente.Move(3, 3, sente.stone.BLACK), sente.Move(5, 5, sente.stone.WHITE), sente.Move(3, 3, sente.stone.BLACK)]
with self.assertRaises(sente.exceptions.IllegalMoveException):
game.play_sequence(moves)
self.assertEqual(sente.stone.EMPTY, game.get_point(3, 3))
def test_get_default_sequence_root(self):
"""
tests to see if the game correctly obtains the default sequence of moves
:return:
"""
game = sente.Game()
game.play(4, 4)
game.play(16, 4)
game.play(4, 16)
game.advance_to_root()
default_branch = game.get_default_sequence()
moves = [sente.Move(3, 3, sente.stone.BLACK), sente.Move(15, 3, sente.stone.WHITE), sente.Move(3, 15, sente.stone.BLACK)]
self.assertEqual(moves, default_branch)
def test_get_default_sequence_child(self):
"""
makes sure that the "default sequence" refers to the sequence of moves *continuing* from the current node
:return:
"""
game = sente.Game()
game.play(4, 4)
game.play(16, 4)
game.play(4, 16)
game.step_up()
default_branch = game.get_default_sequence()
moves = [sente.Move(3, 15, sente.stone.BLACK)]
self.assertEqual(moves, default_branch)
def test_get_all_sequences(self):
"""
tests the get_all_sequences() method
:return:
"""
game = sente.Game()
game.play(4, 4)
game.play(4, 16)
game.advance_to_root()
game.play(16, 4)
game.play(16, 16)
game.advance_to_root()
sequences = game.get_all_sequences()
self.assertEqual([[sente.Move(3, 3, sente.stone.BLACK), sente.Move(3, 15, sente.stone.WHITE)],
[sente.Move(15, 3, sente.stone.BLACK), sente.Move(15, 15, sente.stone.WHITE)]], sequences)
def test_get_all_sequences_does_not_move(self):
"""
makes sure that calling the get_all_sequences() method does not move the current posisiton in the game tree
:return:
"""
game = sente.Game()
game.play(4, 4)
game.play(4, 16)
game.advance_to_root()
game.play(16, 4)
game.play(16, 16)
game.step_up()
sequences = game.get_all_sequences()
# we should only get one branch
self.assertEqual([[sente.Move(15, 15, sente.stone.WHITE)]], sequences)
# and that branch should not have been played
self.assertEqual(sente.stone.BLACK, game.get_point(16, 4))
self.assertEqual(sente.stone.EMPTY, game.get_point(16, 16))
def test_is_at_root(self):
"""
tests to see if the is at root method works
:return:
"""
game = sente.Game()
self.assertTrue(game.is_at_root())
game.play(4, 4)
self.assertFalse(game.is_at_root())
game.play(3, 3)
game.step_up()
self.assertFalse(game.is_at_root())
game.step_up()
self.assertTrue(game.is_at_root())
def test_resign(self):
"""
tests to see if resignation works
:return:
"""
game = sente.Game()
game.resign()
self.assertEqual(sente.stone.WHITE, game.get_winner())
def test_get_children(self):
"""
tests to see if the
:return:
"""
game = sente.Game()
self.assertEqual(game.get_branches(), [])
game.play(3, 3)
game.step_up()
game.play(2, 3)
game.step_up()
self.assertEqual(game.get_branches(), [sente.Move(2, 2, sente.stone.BLACK), sente.Move(1, 2, sente.stone.BLACK)])
def test_step_up_0_steps(self):
"""
check to see if we can step up once
:return:
"""
game = sente.Game()
game.play(3, 3)
game.step_up(0)
self.assertEqual(sente.stone.BLACK, game.get_point(3, 3))
def test_illegal_step_up(self):
"""
tests to see if we get a value error when we try to step up past the root node
:return:
"""
game = sente.Game()
game.play(3, 3)
with self.assertRaises(ValueError):
game.step_up(2)
def test_undo_resign(self):
"""
tests to see if resignation can be undone
:return:
"""
game = sente.Game()
game.resign()
self.assertTrue(game.is_legal(3, 3))
def test_undo_double_pass(self):
"""
tests to see if resignation can be undone
:return:
"""
game = sente.Game()
game.pss()
game.pss()
game.advance_to_root()
self.assertTrue(game.is_legal(3, 3))
def test_resign_move(self):
"""
checks to see if the resign move object causes resignation
:return:
"""
game = sente.Game()
game.play(sente.moves.Resign(sente.stone.BLACK))
self.assertTrue(game.is_over())
def test_comment_write(self):
"""
tests to see if comments can be added and stored
:return:
"""
game = sente.Game()
game.comment = "This is the Root"
self.assertEqual(game.comment, "This is the Root")
def test_comment_long_term(self):
"""
tests to see if comments persist after moves are undone and other comments are set
:return:
"""
game = sente.Game()
game.comment = "This is the Root"
game.play(4, 4)
game.comment = "this is the first branch"
game.step_up()
game.play(16, 4)
game.comment = "this is the second branch"
game.advance_to_root()
self.assertEqual(game.comment, "This is the Root")
game.play(4, 4)
self.assertEqual(game.comment, "this is the first branch")
game.step_up()
game.play(16, 4)
self.assertEqual(game.comment, "this is the second branch")
def test_comment_override(self):
"""
tests to see if comments can be overridden
:return:
"""
game = sente.Game()
game.comment = "this is the original string"
game.comment = "this is the new string"
self.assertEqual("this is the new string", game.comment)
def test_comment_brackets(self):
"""
tests to see if comments can have brackets in them
:return:
"""
game = sente.Game()
game.comment = "here are some brackets [[]] ]["
self.assertEqual("here are some brackets [[]] ][", game.comment)
def test_backslashes_not_ignored(self):
"""
tests to see if backslashes in comments are legal
:return:
"""
game = sente.Game()
game.comment = "here is a backslash \\"
self.assertEqual("here is a backslash \\", game.comment)
class TestNumpy(DoesNotRaiseTestCase):
def test_9x9_numpy(self):
game = sente.Game(9)
game.play(4, 4)
game.play(9, 4)
game.play(4, 9)
game.play(9, 9)
correct_board = np.zeros((9, 9, 4), dtype=np.uint8)
for i in range(9):
for j in range(9):
if game.get_point(i + 1, j + 1) == sente.stone.BLACK:
correct_board[i][j][0] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.WHITE:
correct_board[i][j][1] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.EMPTY:
correct_board[i][j][2] = 1
def test_13x13_numpy(self):
game = sente.Game(13)
game.play(4, 4)
game.play(9, 4)
game.play(4, 9)
game.play(9, 9)
correct_board = np.zeros((13, 13, 4), dtype=np.uint8)
for i in range(13):
for j in range(13):
if game.get_point(i + 1, j + 1) == sente.stone.BLACK:
correct_board[i][j][0] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.WHITE:
correct_board[i][j][1] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.EMPTY:
correct_board[i][j][2] = 1
def test_ko(self):
"""
tests to see if ko points are accurately recorded
:return:
"""
game = sente.Game()
game.play(2, 2)
game.play(2, 1)
game.play(3, 1)
game.play(1, 2)
game.play(1, 1)
ko_numpy = game.numpy(["ko_points"])
correct = np.zeros((19, 19, 1), dtype=np.uint8)
correct[1, 0, 0] = 1
self.assertTrue(np.array_equal(correct, ko_numpy))
def test_full_game(self):
"""
tests to see if a fully completed game matches the expected numpy array
:return:
"""
game = sente.sgf.load("sgf/Lee Sedol ladder game.sgf")
game.play_default_sequence()
numpy = game.numpy()
correct_board = np.zeros((19, 19, 4), dtype=np.uint8)
for i in range(19):
for j in range(19):
if game.get_point(i + 1, j + 1) == sente.stone.BLACK:
correct_board[i][j][0] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.WHITE:
correct_board[i][j][1] = 1
elif game.get_point(i + 1, j + 1) == sente.stone.EMPTY:
correct_board[i][j][2] = 1
self.assertTrue(np.array_equal(correct_board, numpy))
|
"""
Dataset augmentation functionality
NOTE: This module is much more free to change than many other modules
in CleverHans. CleverHans is very conservative about changes to any
code that affects the output of benchmark tests (attacks, evaluation
methods, etc.). This module provides *dataset augmentation* code for
building models to be benchmarked, not *benchmarks,* and
thus is free to change rapidly to provide better speed, accuracy,
etc.
"""
import tensorflow as tf
# Convenient renaming of existing function
random_horizontal_flip = tf.image.random_flip_left_right
def random_shift(x, pad=(4, 4), mode='REFLECT'):
"""Pad a batch of images and then crop to the original size with a random
offset."""
assert mode in 'REFLECT SYMMETRIC CONSTANT'.split()
xp = tf.pad(x, [[pad[0], pad[0]], [pad[1], pad[1]], [0, 0]], mode)
return tf.random_crop(xp, tf.shape(x))
def batch_augment(x, func, device='/CPU:0'):
"""
Apply dataset augmentation to a batch of exmaples.
:param x: Tensor representing a batch of examples.
:param func: Callable implementing dataset augmentation, operating on
a single image.
:param device: String specifying which device to use.
"""
with tf.device(device):
return tf.map_fn(func, x)
def random_crop_and_flip(x, pad_rows=4, pad_cols=4):
"""Augment a batch by randomly cropping and horizontally flipping it."""
rows = tf.shape(x)[1]
cols = tf.shape(x)[2]
def _pad_img(img):
"""Pad an individual image"""
return tf.image.resize_image_with_crop_or_pad(img, rows + pad_rows,
cols + pad_cols)
def _rand_crop_img(img):
"""Randomly crop an individual image"""
channels = img.get_shape()[2]
return tf.random_crop(img, [rows, cols, channels])
def random_crop_and_flip_image(img):
"""Pad, randomly crop, and randomly flip an individual image"""
return tf.image.random_flip_left_right(_rand_crop_img(_pad_img(img)))
# Some of these ops are only on CPU.
# This function will often be called with the device set to GPU.
# We need to set it to CPU temporarily to avoid an exception.
with tf.device('/CPU:0'):
x = tf.map_fn(random_crop_and_flip_image, x)
return x
|
from django.core.urlresolvers import resolve, reverse
from rest_framework.test import APIRequestFactory, force_authenticate
from scrappyr.users.testing.factories import AdminUserFactory
def json_post_to_view(viewname, **kwargs):
"""Post json request to view given by viewname.
Args:
viewname (str): Name of view, which can be passed to Django's `reverse` function.
Kwargs:
Any key-value pairs required to resolve the view url.
"""
url = reverse(viewname, kwargs=kwargs)
request_factory = APIRequestFactory()
request = request_factory.post(url, content_type='application/json')
force_authenticate(request, user=AdminUserFactory(), token='test-token-1234')
view_func, args, kwargs = resolve(url)
return view_func(request, **kwargs)
|
from __future__ import unicode_literals
import os
from django.conf import settings
from django.test import override_settings
from django.test import TestCase
from django.urls import clear_url_caches
from django.urls import reverse
from django.utils import translation
from django.utils._os import upath
from mock import patch
from kolibri.core.auth.test.helpers import clear_process_cache
from kolibri.utils.conf import OPTIONS
from kolibri.utils.tests.helpers import override_option
settings_override_dict = {
"USE_I18N": True,
"LANGUAGE_CODE": "en",
"LANGUAGES": [("en", "English"), ("fr-fr", "French")],
"MIDDLEWARE": [
"django.contrib.sessions.middleware.SessionMiddleware",
"kolibri.core.device.middleware.KolibriLocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
"ROOT_URLCONF": "kolibri.core.device.test.locale_middleware_urls",
"TEMPLATES": [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(os.path.dirname(upath(__file__)), "templates")],
}
],
}
prefixed_settings_override_dict = settings_override_dict.copy()
prefixed_settings_override_dict[
"ROOT_URLCONF"
] = "kolibri.core.device.test.prefixed_locale_middleware_urls"
def get_url(url):
return "/" + OPTIONS["Deployment"]["URL_PATH_PREFIX"].lstrip("/") + url
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
clear_process_cache()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
clear_process_cache()
class URLPrefixTestsBase(object):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override("en"):
self.assertEqual(reverse("not-prefixed"), get_url("not-prefixed/"))
self.assertEqual(
reverse("not-prefixed-included-url"),
get_url("not-prefixed-include/foo/"),
)
with translation.override("fr-fr"):
self.assertEqual(reverse("not-prefixed"), get_url("not-prefixed/"))
self.assertEqual(
reverse("not-prefixed-included-url"),
get_url("not-prefixed-include/foo/"),
)
def test_prefixed(self):
with translation.override("en"):
self.assertEqual(reverse("prefixed"), get_url("en/prefixed/"))
with translation.override("fr-fr"):
self.assertEqual(reverse("prefixed"), get_url("fr-fr/prefixed/"))
with translation.override(None):
self.assertEqual(
reverse("prefixed"), get_url("%s/prefixed/") % settings.LANGUAGE_CODE
)
@override_settings(**settings_override_dict)
class URLPrefixTests(URLPrefixTestsBase, URLTestCaseBase):
pass
@override_settings(**prefixed_settings_override_dict)
@override_option("Deployment", "URL_PATH_PREFIX", "test/")
class PrefixedURLPrefixTests(URLPrefixTestsBase, URLTestCaseBase):
pass
class URLRedirectTestsBase(object):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get(get_url("not-prefixed/"))
self.assertEqual(response.status_code, 200)
def test_en_prefixed_redirect(self):
response = self.client.get(
get_url("prefixed/"), HTTP_ACCEPT_LANGUAGE="en", follow=True
)
self.assertRedirects(response, get_url("en/prefixed/"), 302)
def test_fr_fr_prefixed_redirect(self):
response = self.client.get(
get_url("prefixed/"), HTTP_ACCEPT_LANGUAGE="fr-fr", follow=True
)
self.assertRedirects(response, get_url("fr-fr/prefixed/"), 302)
def test_fr_fr_prefixed_redirect_session(self):
session = self.client.session
session[translation.LANGUAGE_SESSION_KEY] = "fr-fr"
session.save()
response = self.client.get(get_url("prefixed/"), follow=True)
self.assertRedirects(response, get_url("fr-fr/prefixed/"), 302)
def test_fr_fr_prefixed_redirect_device_setting(self):
with patch(
"kolibri.core.device.translation.get_device_language", return_value="fr-fr"
):
response = self.client.get(get_url("prefixed/"), follow=True)
self.assertRedirects(response, get_url("fr-fr/prefixed/"), 302)
@override_settings(**settings_override_dict)
class URLRedirectTests(URLRedirectTestsBase, URLTestCaseBase):
pass
@override_settings(**prefixed_settings_override_dict)
@override_option("Deployment", "URL_PATH_PREFIX", "test/")
class PrefixedURLRedirectTests(URLRedirectTestsBase, URLTestCaseBase):
pass
class URLRedirectWithoutTrailingSlashTestsBase(object):
"""
Tests the redirect when the requested URL doesn't end with a slash
"""
def test_not_prefixed_redirect(self):
response = self.client.get(get_url("not-prefixed"), HTTP_ACCEPT_LANGUAGE="en")
self.assertRedirects(response, get_url("not-prefixed/"), 301)
def test_en_prefixed_redirect(self):
response = self.client.get(
get_url("prefixed"), HTTP_ACCEPT_LANGUAGE="en", follow=True
)
self.assertRedirects(response, get_url("en/prefixed/"), 302)
def test_fr_fr_prefixed_redirect(self):
response = self.client.get(
get_url("prefixed"), HTTP_ACCEPT_LANGUAGE="fr-fr", follow=True
)
self.assertRedirects(response, get_url("fr-fr/prefixed/"), 302)
def test_en_redirect(self):
response = self.client.get(
get_url("prefixed.xml"), HTTP_ACCEPT_LANGUAGE="en", follow=True
)
self.assertRedirects(response, get_url("en/prefixed.xml"), 302)
def test_fr_fr_redirect(self):
response = self.client.get(
get_url("prefixed.xml"), HTTP_ACCEPT_LANGUAGE="fr-fr", follow=True
)
self.assertRedirects(response, get_url("fr-fr/prefixed.xml"), 302)
@override_settings(**settings_override_dict)
class URLRedirectWithoutTrailingSlashTests(
URLRedirectWithoutTrailingSlashTestsBase, URLTestCaseBase
):
pass
@override_settings(**prefixed_settings_override_dict)
@override_option("Deployment", "URL_PATH_PREFIX", "test/")
class PrefixedURLRedirectWithoutTrailingSlashTests(
URLRedirectWithoutTrailingSlashTestsBase, URLTestCaseBase
):
pass
class URLResponseTestsBase(object):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get(get_url("en/not-prefixed/"))
self.assertEqual(response.status_code, 404)
@override_settings(**settings_override_dict)
class URLResponseTests(URLResponseTestsBase, URLTestCaseBase):
pass
@override_settings(**prefixed_settings_override_dict)
@override_option("Deployment", "URL_PATH_PREFIX", "test/")
class PrefixedURLResponseTests(URLResponseTestsBase, URLTestCaseBase):
pass
|
from prob_14 import prob_14
n = int(input("Ingrese un numero: "))
print (prob_14(n))
|
""" Monolithic game server function. This file contains all the backend logic to execute moves and
push the observations to the agents. """
import dill, json
import argparse
from multiprocessing import Event
from typing import Type, Dict, List, NamedTuple
from time import sleep
from spacetime import Node, Dataframe
from .data_model import ServerState, Player, _Observation, Observation
from .rl_logging import init_logging, get_logger
from .FrameRateKeeper import FrameRateKeeper
from .BaseEnvironment import BaseEnvironment
from .config import get_environment, ENVIRONMENT_CLASSES, available_environments
from .util import log_params
logger = get_logger()
class Timeout(NamedTuple):
connect: float = 30.0
start: float = 5.0
move: float = 5.0
end: float = 10.0
def server_app(dataframe: Dataframe,
env_class: Type[BaseEnvironment],
observation_type: Type,
args: dict,
whitelist: list = None,
ready_event: Event = None):
timeout = Timeout()
fr: FrameRateKeeper = FrameRateKeeper(max_frame_rate=args['tick_rate'])
# Keep track of each player and their associated observations
observation_dataframes: Dict[int, Dataframe] = {}
observations: Dict[int, _Observation] = {}
players: Dict[int, Player] = {}
# Function to help push all observations
def push_observations():
for df in observation_dataframes.values():
df.commit()
# Add the server state to the master dataframe
server_state = ServerState(env_class.__name__, args["config"], env_class.observation_names())
dataframe.add_one(ServerState, server_state)
dataframe.commit()
# Function to help clean up server if it ever needs to shutdown
def close_server(message: str):
server_state.terminal = True
logger.error(message)
dataframe.commit()
sleep(5)
# Create the environment and start the server
env: BaseEnvironment = env_class(args["config"])
logger.info("Waiting for enough players to join ({} required)...".format(env.min_players))
# Add whitelist support, players will be rejected if their key does not match the expected keys
whitelist = [] if whitelist is None else whitelist
whitelist_used = len(whitelist) > 0
whitelist_connected = {key: False for key in whitelist}
# If we were created by some server manager, inform them we are ready for players
if ready_event is not None:
ready_event.set()
# -----------------------------------------------------------------------------------------------
# Wait for all players to connect
# -----------------------------------------------------------------------------------------------
fr.start_timeout(timeout.connect)
while len(players) < env.min_players:
if fr.tick():
close_server("Game could not find enough players. Shutting down game server.")
return 1
dataframe.sync()
new_players: Dict[int, Player] = dict((p.pid, p) for p in dataframe.read_all(Player))
# Any players that have connected by have not been acknowledged yet
for new_id in new_players.keys() - players.keys():
name = new_players[new_id].name
auth_key = new_players[new_id].authentication_key
if whitelist_used and auth_key not in whitelist_connected:
logger.info("Player tried to join with invalid authentication_key: {}".format(name))
dataframe.delete_one(Player, new_id)
del new_players[new_id]
continue
if whitelist_used and whitelist_connected[auth_key]:
logger.info("Player tried to join twice with the same authentication_key: {}".format(name))
dataframe.delete_one(Player, new_id)
del new_players[new_id]
continue
logger.info("New player joined with name: {}".format(name))
# Create new observation dataframe for the new player
obs_df = Dataframe("{}_observation".format(name), [observation_type])
obs = observation_type(new_id)
obs_df.add_one(observation_type, obs)
# Add the dataframes to the database
observation_dataframes[new_id] = obs_df
observations[new_id] = obs
whitelist_connected[auth_key] = True
# If any players that we have added before have dropped out
for remove_id in players.keys() - new_players.keys():
logger.info("Player {} has left.".format(players[remove_id].name))
auth_key = players[remove_id].authentication_key
whitelist_connected[auth_key] = False
del observations[remove_id]
del observation_dataframes[remove_id]
players = new_players
# -----------------------------------------------------------------------------------------------
# Create all of the player data and wait for the game to begin
# -----------------------------------------------------------------------------------------------
logger.info("Finalizing players and setting up new environment.")
server_state.server_no_longer_joinable = True
# Create the initial state for the environment and push it if enabled
state, player_turns = env.new_state(num_players=len(players))
if not args["observations_only"] and env.serializable():
server_state.serialized_state = env.serialize_state(state)
# Set up each player
for i, (pid, player) in enumerate(players.items()):
# Add the initial observation to each player
observations[pid].set_observation(env.state_to_observation(state=state, player=i))
# Finalize each player by giving it a player number and a port for the dataframe
player.finalize_player(number=i, observation_port=observation_dataframes[pid].details[1])
if i in player_turns:
player.turn = True
# Push all of the results to the player
players_by_number: Dict[int, Player] = dict((p.number, p) for p in players.values())
push_observations()
dataframe.sync()
# Wait for all players to be ready
fr.start_timeout(timeout.start)
while not all(player.ready_for_start for player in players.values()):
if fr.tick():
close_server("Players have dropped out between entering the game and starting the game.")
return 2
dataframe.checkout()
# -----------------------------------------------------------------------------------------------
# Primary game loop
# -----------------------------------------------------------------------------------------------
logger.info("Game started...")
terminal = False
winners = None
dataframe.commit()
fr.start_timeout(timeout.move)
while not terminal:
# Wait for a frame to tick
move_timeout = fr.tick()
# Get new data
dataframe.checkout()
# Get the player dataframes of the players who's turn it is right now
current_players: List[Player] = [p for p in players.values() if p.number in player_turns]
current_actions: List[str] = []
ready = args['realtime'] or move_timeout or all(p.ready_for_action_to_be_taken for p in current_players)
if not ready:
continue
# Queue up each players action if it is legal
# If the player failed to respond in time, we will simply execute the previous action
# If it is invalid, we will pass in a blank string
for player in current_players:
if player.action == '' or env.is_valid_action(state=state, player=player.number, action=player.action):
current_actions.append(player.action)
else:
logger.info("Player #{}, {}'s, action of {} was invalid, passing empty string as action"
.format(player.number, player.name, player.action))
current_actions.append('')
# Execute the current move
state, player_turns, rewards, terminal, winners = (
env.next_state(state=state, players=player_turns, actions=current_actions)
)
# Update true state if enabled
if not args["observations_only"] and env.serializable():
server_state.serialized_state = env.serialize_state(state)
# Update the player data from the previous move.
for player, reward in zip(current_players, rewards):
player.reward_from_last_turn = float(reward)
player.ready_for_action_to_be_taken = False
player.turn = False
# Tell the new players that its their turn and provide observation
for player_number in player_turns:
player = players_by_number[player_number]
observations[player.pid].set_observation(env.state_to_observation(state=state, player=player_number))
player.turn = True
if terminal:
server_state.terminal = True
server_state.winners = dill.dumps(winners)
for player_number in winners:
players_by_number[player_number].winner = True
logger.info("Player: {} won the game.".format(winners))
push_observations()
dataframe.commit()
fr.start_timeout(timeout.move)
# -----------------------------------------------------------------------------------------------
# Clean up after game
# -----------------------------------------------------------------------------------------------
for player in players.values():
player.turn = True
# Compute final rankings for this game and push them to the players so they can inform themselves how the performed
rankings = env.compute_ranking(state, list(range(len(players))), winners)
ranking_dict = {players_by_number[number].name: ranking for number, ranking in rankings.items()}
server_state.rankings = json.dumps(ranking_dict)
dataframe.commit()
dataframe.push()
# TODO| The code below attempts to ensure that the players have the final state of the game before the server quits.
# TODO| However, an error is thrown when players disconnect during the checkout. If this snippet was removed,
# TODO| players would have a similar error when the server would quit while they are pulling.
# TODO| May need to talk to Rohan about cleanly exiting this kind of situation.
# TODO| It would also be great if we could instead properly confirm that recipients got a message.
fr.start_timeout(timeout.end)
for player in players.values():
while not player.acknowledges_game_over and not fr.tick():
dataframe.checkout()
logger.info("Game has ended. Player {} is the winner.".format([key for key, value in ranking_dict.items() if value == 0]))
return ranking_dict
if __name__ == '__main__':
logger = init_logging(logfile=None, redirect_stdout=True, redirect_stderr=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="""
Script for launching match servers manually. This will launch a game server and wait for players to connect.
""")
parser.add_argument("--environment", '-e', type=str, default='tron',
help="The name of the environment. Choices are: {}".format(available_environments()))
parser.add_argument("--config", '-c', type=str, default="",
help="Config string that will be passed into the environment constructor.")
parser.add_argument("--port", "-p", type=int, default=7777,
help="Server Port.")
parser.add_argument("--tick-rate", "-t", type=int, default=60,
help="The max frame rate that the server will run on.")
parser.add_argument("--realtime", "-r", action="store_true",
help="With this flag on, the server will not wait for all of the clients to respond.")
parser.add_argument("--observations-only", '-f', action='store_true',
help="With this flag on, the server will not push the true state of the game to the clients "
"along with observations")
parser.add_argument("--loop", '-l', action='store_true',
help="If this flag is set, the script will continually launch game servers. If not, the "
"program will exit after the game has ended.")
args = parser.parse_args()
log_params(args)
try:
env_class = get_environment(args.environment)
except KeyError:
raise ValueError("The \'environment\' argument must must be chosen from the following list: {}".format(
available_environments()
))
observation_type: Type[_Observation] = Observation(env_class.observation_names())
while True:
app = Node(server_app,
server_port=args.port,
Types=[Player, ServerState])
app.start(env_class, observation_type, vars(args))
del app
if not args.loop:
break
|
#!/bin/env python3
from math import cos,pi
def size_inscrit_polygon(size, n):
return size * (2 - 2 * cos(2*pi / n))**0.5
|
from setuptools import setup, find_packages
from codecs import open
from os import path
VERSION = 'v2.1.0'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='random-password-generator',
version=VERSION,
description='Simple and custom random password generator for python',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/suryasr007/random-password-generator',
author='Surya Teja Reddy Valluri',
author_email='94suryateja@gmail.com',
license='MIT',
py_modules=['password_generator'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
'Operating System :: POSIX'
],
keywords='random password generator different lengths'
)
|
# Time: O(nlon + n * t), t is the value of target.
# Space: O(t)
# 377
# Given an integer array with all positive numbers and no duplicates,
# find the number of possible combinations that add up to a positive integer target.
#
# Example:
#
# nums = [1, 2, 3]
# target = 4
#
# The possible combination ways are:
# (1, 1, 1, 1)
# (1, 1, 2)
# (1, 2, 1)
# (1, 3)
# (2, 1, 1)
# (2, 2)
# (3, 1)
#
# Note that different sequences are counted as different combinations.
#
# Therefore the output is 7.
# Follow up:
# What if negative numbers are allowed in the given array?
# How does it change the problem?
# What limitation we need to add to the question to allow negative numbers?
from functools import lru_cache
class Solution(object):
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
dp = [1] + [0] * target
nums.sort()
for i in range(1, target+1):
for n in nums:
if n > i:
break
dp[i] += dp[i - n]
return dp[target]
# Follow up: The problem with negative numbers is that now the combinations could be potentially of infinite length.
# E.g. nums = [-1, 1] and target = 1. So we should limit the length of the combination sequence (give a bound to the problem).
def combinationSum4WithLength(self, nums, target, length):
@lru_cache(None)
def foo(target, length):
ans = 0
if target == 0 and length >= 0:
ans += 1
if length == 1:
ans += 1 * (target in nums)
elif length > 1:
for num in nums:
ans += foo(target-num, length-1)
return ans
return foo(target, length)
''' OR use memorization
def combinationSum4WithLength(self, nums, target, length):
import collections
memo = collections.defaultdict(int)
def recur(target, length):
if (target, length) not in memo:
if target == 0 and length >= 0:
memo[target, length] += 1 # shorter than length limit is ok as target is reached
if length == 1:
memo[target, length] += 1 * (target in nums)
elif length > 1:
for num in nums:
memo[target, length] += self.combinationSum4WithLength(nums, target - num, length - 1)
return memo[target, length]
return recur(target, length)
'''
print(Solution().combinationSum4([1,2,3], 4)) # 7
print(Solution().combinationSum4WithLength([-1, 1], 1, 3)) # 4: [1], [-1,1,1], [1,-1,1], [1,1,-1]
print(Solution().combinationSum4WithLength([-1, 1, 0], 1, 3)) # 9
print(Solution().combinationSum4WithLength([-1, 1], 1, 5)) # 14:
#[1], [-1,1,1], [1,-1,1], [1,1,-1]
#lenth 5: 5C2 = 10
|
#!/usr/bin/env pypy
import sys
def mex(S):
S = set(S)
v = 0
while v in S:
v += 1
return v
n = int(sys.argv[1])
sg = [0] * (n + 1)
cnt = 0
for i in range(2, n + 1):
S = []
for j in range(0, i - 1):
S.append(sg[j] ^ sg[i - j - 2])
sg[i] = mex(S)
if sg[i]:
cnt += 1
def p(s, l = 34):
for i in range(0, len(s), l):
print s[i : i + l]
p("".join(map(str, sg)))
print cnt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import sys
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
about = {}
with open(os.path.join(here, "incubator", "__version__.py")) as f:
exec(f.read(), about)
if sys.argv[-1] == "publish":
os.system("python setup.py sdist bdist_wheel upload")
sys.exit()
required = [
"arcgis > 1.6",
"numpy >= 1.16.2",
"scikit-image >= 0.16",
"laspy >= 1.2",
"rpy2 >= 2.9.4",
"docopt >= 0.6.2"
]
setup(
name="incubator",
version=about["__version__"],
description="Python command line application (CLI) containing a genetic algorithm to find best parameters for arbitrary assignments.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Adrian Kuhn",
author_email="adrian.kuhn@lu.ch",
url="https://app.geo.lu.ch/redmine/projects/incubator",
packages=find_packages(exclude=["tests"]),
entry_points={},
package_data={},
python_requires=">=3.6",
setup_requires=[],
install_requires=required,
extras_require={},
include_package_data=True,
license="Other",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: Other/Proprietary License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Utilities"
],
cmdclass={},
)
|
from logging import critical, debug, error, info, warning
from src.Player import Player
from src.utils import get_stars, query_leaderboard_API, truncate_name
HEADER = """
Day 1111111111222222
1234567890123456789012345\
"""
class Leaderboard:
def __init__(self, db) -> None:
self.players = []
self.query_leaderboard()
self.merge_shelf(db)
def query_leaderboard(self):
data = query_leaderboard_API()['members']
for person in data.values():
self.players.append(Player(person))
self.players = sorted(self.players, key=lambda player: player.score, reverse=True)
def custom_leaderboard(self):
scores = {player: 0 for player in self.players}
for day in range(0,25):
part1 = sorted([
(player, player.days[day].part1_time) for player in self.players if player.days[day].part1_time
], key=lambda tup: tup[-1])
part2 = sorted([
(player, player.days[day].part2_time) for player in self.players if player.days[day].part2_time
], key=lambda tup: tup[-1])
for pos, (player, _) in enumerate(part1):
scores[player] += len(self.players) - pos
for pos, (player, _) in enumerate(part2):
scores[player] += len(self.players) - pos
return self.build_embed({k:v for k, v in sorted(scores.items(), key=lambda entry: entry[-1], reverse=True)})
def public_leaderboard(self) -> str:
return self.build_embed({player: player.score for player in self.players})
def build_embed(self, scores: dict):
ls = [HEADER]
for pos, (player, score) in enumerate(scores.items()):
ls.append(f"{pos: 3}) {score: 4} {get_stars(player)} {truncate_name(player.name)}")
return "```\n{}```".format("\n".join(ls))
def merge_shelf(self, db):
registered_players = [x['username'].lower() for x in db.values()]
inverted_db = {v['username'].lower(): {'start_times': v['start_times'], 'discord': k} for k,v in db.items()}
for player in self.players:
if player.name.lower() not in registered_players:
debug(f"in merge_self: '{player.name}' is not in registered_players")
continue
player.discord = inverted_db[player.name.lower()]['discord']
for problem in player.days:
if(inverted_db[player.name.lower()]['start_times'][problem.day-1]):
problem.start_time = inverted_db[player.name.lower()]['start_times'][problem.day-1]
|
"""
virenamer
"""
import subprocess
import sys
from argparse import ONE_OR_MORE, ArgumentParser
from os import getenv
from pathlib import Path
from shutil import rmtree
from tempfile import NamedTemporaryFile
from typing import List, Optional
from colorama import Fore as F
from . import __version__
DEFAULT_EDITOR = getenv("EDITOR", "vim")
def bulk_rename(
sources: List[Path],
destinations: List[Optional[Path]],
delete: bool = False,
dryrun: bool = False,
force: bool = False,
):
"""
function that renames the source files to destination files given the options
"""
assert len(sources) == len(
destinations
), "File count has changed, cannot rename any file"
# iterate the two lists
for source, dest in zip(sources, destinations):
if dest is None:
if not delete:
# delete is not allowed
print(
f"{F.RED}'{source}' won't be deleted, use --delete to enable file deletion{F.RESET}"
)
elif dryrun:
# dryrun mode
print(f"{F.MAGENTA}(dryrun) Delete '{source}'{F.RESET}")
else:
# delete the source
print(f"{F.GREEN}Delete '{source}'{F.RESET}")
if source.is_dir():
rmtree(source)
else:
source.unlink()
else:
if source == dest:
# same source/dest, skip
pass
elif dest.exists() and not force:
# file already exists
print(
f"{F.RED}'{dest}' already exists, skip renaming, use --force to overwrite'{source}'{F.RESET}"
)
elif dryrun:
# dryrun mode
print(f"{F.MAGENTA}(dryrun) Rename '{source}' --> '{dest}'{F.RESET}")
else:
# rename the file
print(f"{F.GREEN}Rename '{source}' --> '{dest}'{F.RESET}")
dest.parent.mkdir(parents=True, exist_ok=True)
source.rename(dest)
def run():
"""
cli entrypoint
"""
parser = ArgumentParser(description="File renamer")
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"-e",
"--editor",
default=DEFAULT_EDITOR,
action="store",
help=f"editor used to edit file list (default is {DEFAULT_EDITOR})",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="overwrite if target file already exists",
)
parser.add_argument(
"-d", "--delete", action="store_true", help="delete file if line is empty"
)
parser.add_argument(
"-n", "--dryrun", action="store_true", help="dryrun mode, don't rename any file"
)
parser.add_argument("files", nargs=ONE_OR_MORE, type=Path, help="files to rename")
args = parser.parse_args()
try:
# filter existing files from input and avoid doublons
input_files = [f for f in dict.fromkeys(args.files) if f.exists()]
assert len(input_files) > 0, "No valid file to rename"
# edit the files list with editor
with NamedTemporaryFile() as tmp:
tmpfile = Path(tmp.name)
# write the file list to file
tmpfile.write_text("".join(f"{f}\n" for f in input_files), encoding="utf8")
# user edit the file list
subprocess.check_call([args.editor, tmp.name])
# read the new file list
output_files = [
Path(l)
if len(l.strip()) > 0
else None # handle file delete with None destination
for l in tmpfile.read_text(encoding="utf8").splitlines()
]
# rename the files
bulk_rename(
input_files,
output_files,
delete=args.delete,
dryrun=args.dryrun,
force=args.force,
)
except BaseException as exc: # pylint: disable=broad-except
print(f"{F.RED}ERROR: {exc}{F.RESET}", file=sys.stderr)
exit(1)
|
from elf.types.base.int.elf_int_8_type import ElfInt8Type
from elf.types.base.int.elf_int_16_type import ElfInt16Type
from elf.types.base.int.elf_int_32_type import ElfInt32Type
from elf.types.base.int.elf_int_64_type import ElfInt64Type
from elf.types.base.int.elf_int_n_type import ElfIntNType
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class ConstantGlobalParameter:
def __init__(self, name: str, value: str):
self.name = name
self.value = value
|
# creating a Class of name Employee
class Employee:
# creating a Class Variable
increament = 1.5
# Creating a Constructor of the Class Employee
def __init__(self, fname, lname, salary):
# setting Values to the variables
self.fname = fname
# setting Values to the variables
self.lname = lname
# setting Values to the variables
self.salary = salary
def increase(self): # Creating a Constructor
# making a change in the Class variable
self.salary = int(self.salary * Employee.increament)
# creating Objects of the Class Employee
alex = Employee("Alex", "Mercer", 44000)
harry = Employee("Harry", "Bhai", 44000)
# Increasing the Salary of the Alex Object
alex.increase()
# printing the Salary of the alex Object
print(alex.salaray)
# printing all the Elements of the Alex Object
print(alex.__dict__)
# printing all the Elements of the Employee Class
print(Employee.__dict__)
|
from django.shortcuts import render, redirect
from django.contrib import messages
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.forms.models import modelformset_factory
from basic_newsletter.models import Issue, NewsItem, Newsletter
from basic_newsletter.forms import IssueForm, NewsItemForm, \
HeadlineNewsItemForm, CreateIssueForm, EmailTestForm
# Display the list of issues on the homepage.
# Also display a form to create a new issue for a pre-existing newsletter type
@login_required
def home(request, template='basic_newsletter/home.html'):
newsletter_list = Newsletter.objects.all()
newsletter_issue_list = []
for newsletter in newsletter_list:
newsletter_issue_list.append((newsletter,
Issue.objects
.filter(published_state='Draft',
newsletter=newsletter)
.order_by('-issue_date'),
Issue.objects
.filter(published_state='Ready to Test',
newsletter=newsletter)
.order_by('-issue_date'),
Issue.objects
.filter(published_state='Published',
newsletter=newsletter)
.order_by('-issue_date'),
))
context = dict(newsletter_issue_list=newsletter_issue_list, )
return render(request, template, context)
# Create a new issue of a newsletter as selected from the homepage
@login_required
def create_issue(request, template='basic_newsletter/issue_create.html'):
if request.method == 'POST':
form = CreateIssueForm(request.POST)
if form.is_valid():
issue = Issue()
newsletter = Newsletter.objects.get(
id=request.POST['newsletter_type'])
issue.newsletter = newsletter
issue_date = datetime(year=int(request.POST['issue_date_year']),
month=int(request.POST['issue_date_month']),
day=1)
issue.issue_date = issue_date
issue.published_state = 'Draft'
issue.save()
issue.story_categories = request.POST.getlist('sections')
issue.save()
return redirect('newsletter:edit_issue', issue_id=issue.id)
else:
form = CreateIssueForm()
context = dict(
form=form,
)
return render(request, template, context)
@login_required
def edit_issue(request, issue_id, template='basic_newsletter/issue_edit.html'):
issue = Issue.objects.get(id=issue_id)
headline_formsets = []
non_headline_formsets = []
if request.method == 'POST':
errors = False
form = IssueForm(request.POST)
if form.is_valid():
issue = form.save(commit=False)
issue.template = issue.newsletter.template
issue.save()
for headline_category in issue.headline_categories:
headline_news_item_form_set = modelformset_factory(NewsItem,
form=HeadlineNewsItemForm,
can_delete=True)
form_headline = headline_news_item_form_set(request.POST, request.FILES,
prefix=headline_category.code_name)
headline_formsets.append((headline_category, form_headline))
if form_headline.is_valid():
new_headlines = form_headline.save(commit=False)
for new_headline in new_headlines:
new_headline.feature_type = headline_category
new_headline.issue = issue
new_headline.save()
for deleted_headlines in form_headline.deleted_objects:
deleted_headlines.delete()
else:
errors = True
for category in issue.non_headline_categories:
news_item_form_set = modelformset_factory(NewsItem,
form=NewsItemForm,
can_delete=True)
form = news_item_form_set(request.POST, request.FILES,
prefix=category.code_name)
non_headline_formsets.append((category, form))
if form.is_valid():
new_stories = form.save(commit=False)
for story in new_stories:
story.feature_type = category
story.issue = issue
story.save()
for deleted_story in form.deleted_objects:
deleted_story.delete()
else:
errors = True
if not errors:
if request.POST.get('draft'):
issue.published_state = 'Draft'
issue.save()
return redirect('newsletter:home')
elif request.POST.get('preview'):
return redirect('newsletter:preview_issue', issue_id=issue.id)
elif request.POST.get('save_published'):
issue.published_state = 'Published'
issue.save()
return redirect('newsletter:home')
else:
for headline_category in issue.headline_categories:
feature_queryset = NewsItem.objects.filter(issue=issue,
feature_type=headline_category)
extra_forms = 1
if len(feature_queryset) > 0:
extra_forms = 0
headline_news_item_form_set = modelformset_factory(NewsItem,
form=HeadlineNewsItemForm,
can_delete=True,
extra=extra_forms)
form_headline = headline_news_item_form_set(queryset=feature_queryset,
prefix=headline_category.code_name)
headline_formsets.append((headline_category, form_headline))
for category in issue.non_headline_categories:
feature_queryset = NewsItem.objects.filter(issue=issue,
feature_type=category)
extra_forms = 1
if len(feature_queryset) > 0:
extra_forms = 0
news_item_form_set = modelformset_factory(NewsItem,
form=NewsItemForm,
can_delete=True,
extra=extra_forms)
form = news_item_form_set(queryset=feature_queryset,
prefix=category.code_name)
non_headline_formsets.append((category, form))
context = dict(
issue=issue,
headline_formsets=headline_formsets,
non_headline_formsets=non_headline_formsets,
)
return render(request, template, context)
# Display the preview of an issue in the corresponding template.
@login_required
def preview_issue(request, issue_id, template='basic_newsletter/preview_issue.html'):
issue = Issue.objects.get(id=issue_id)
if request.method == 'POST':
if request.POST.get('to_test'):
errors = issue.is_complete()
for error in errors:
messages.error(request, error)
if len(errors) == 0:
issue.mark_ready_to_publish(request)
return redirect('newsletter:home')
elif request.POST.get('send_email'):
return redirect('newsletter:test_issue', issue_id=issue.id)
pass
elif request.POST.get('draft'):
issue.published_state = 'Draft'
issue.save()
return redirect('newsletter:edit_issue', issue_id=issue.id)
elif request.POST.get('close'):
return redirect('newsletter:home')
context = dict(
issue=issue,
)
return render(request, template, context)
@login_required
def preview_issue_html(request, issue_id):
issue = Issue.objects.get(id=issue_id)
template = issue.html_email_template
context = dict(
issue=issue,
tracking=False,
clicks=False,
)
return render(request, template, context)
@login_required
def preview_issue_text(request, issue_id):
issue = Issue.objects.get(id=issue_id)
template = issue.plain_text_email_template
context = dict(
issue=issue,
)
return render(request, template, context)
@login_required
def click_analysis(request, issue_id):
issue = Issue.objects.get(id=issue_id)
# template = issue.html_email_template
template='basic_newsletter/preview_issue.html'
context = dict(
issue=issue,
tracking=False,
clicks=True,
)
return render(request, template, context)
@login_required
def analyze_issue_html(request, issue_id):
issue = Issue.objects.get(id=issue_id)
template = issue.html_email_template
context = dict(
issue=issue,
tracking=False,
clicks=True,
)
return render(request, template, context)
@login_required
def delete_issue(request, issue_id, template='basic_newsletter/delete_issue.html'):
issue = Issue.objects.get(id=issue_id)
if request.POST.get('delete'):
issue.delete()
return redirect('newsletter:home')
elif request.POST.get('cancel_delete'):
return redirect('newsletter:home')
else:
context = dict(
issue=issue,
)
return render(request, template, context)
@login_required
def test_issue(request, issue_id, template='basic_newsletter/test_email.html'):
issue = Issue.objects.get(id=issue_id)
if request.method == 'POST':
form = EmailTestForm(request.POST)
if request.POST.get('send'):
recipient = form["recipient"].value()
sender = form["sender"].value()
issue.send_test(recipient, sender)
messages.success(request,
"Test email sent to %s for %s." % (recipient,
issue))
return redirect('newsletter:home')
else:
form = EmailTestForm()
context = dict(
form=form,
issue=issue,
)
return render(request, template, context)
@login_required
def publish_issue(request, issue_id, template="basic_newsletter/publish_issue.html"):
issue = Issue.objects.get(id=issue_id)
if request.POST.get('publish'):
issue.published_date = datetime.now()
issue.publish(request)
return redirect('newsletter:home')
elif request.POST.get('cancel'):
return redirect('newsletter:home')
else:
context = dict(
issue=issue,
)
return render(request, template, context)
@login_required
def reupload_issue(request, issue_id):
issue = Issue.objects.get(id=issue_id)
issue.upload(re_upload=True)
return redirect('newsletter:home')
|
import logging
import networkx as nx
from iteration_utilities import duplicates
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split
from stellargraph.data import EdgeSplitter
logger = logging.getLogger(__name__)
def get_edge_list(G, sort=True):
if sort:
sorted_edges = sorted(G.edges(), key=lambda x: x[0], reverse=True)
return sorted_edges
else:
return list(G.edges())
def list_duplicate_edges(G):
dup = list(duplicates(get_edge_list(G)))
if len(dup) > 0:
logger.info(f"{len(dup)} duplicates found")
return dup
def create_network_from_df(df):
df = df.rename(columns={df.columns[0]: "source", df.columns[1]: "target"})
graph_type = nx.Graph()
G = nx.from_pandas_edgelist(df, create_using=graph_type)
return G
def remove_nodes_with_low_degree(G, n):
degree_sorted = sorted(G.degree(), key=lambda x: x[1], reverse=True)
# keep top n nodes
node_list_remove = [node for node, v in degree_sorted[n::]]
G.remove_nodes_from(node_list_remove)
return G
def get_subgraph(G, node_list):
subgraph = G.subgraph(node_list)
diameter = nx.diameter(subgraph)
print("Network diameter of largest component:", diameter)
return subgraph
def created_weighted_network_with_cosine_similarity(df):
S = cosine_similarity(df.values)
G = nx.from_numpy_array(S)
names = list(df.index)
mapping = dict(zip(G, names))
return nx.relabel_nodes(G, mapping)
def set_attributes(G, **kwargs):
for key in kwargs.keys():
nx.set_node_attributes(G, kwargs[key], key)
return G
def edge_splitter_graph_train_test(graph):
edge_splitter_test = EdgeSplitter(graph)
graph_test, examples_test, labels_test = edge_splitter_test.train_test_split(
p=0.1, method="global"
)
print(graph_test.info())
# Do the same process to compute a training subset from within the test graph
edge_splitter_train = EdgeSplitter(graph_test, graph)
graph_train, examples, labels = edge_splitter_train.train_test_split(
p=0.1, method="global"
)
(
examples_train,
examples_model_selection,
labels_train,
labels_model_selection,
) = train_test_split(examples, labels, train_size=0.75, test_size=0.25)
print(graph_train.info())
return (
graph_train,
graph_test,
examples_train,
examples_test,
examples_model_selection,
labels_train,
labels_test,
labels_model_selection,
)
def link_examples_to_features(link_examples, transform_node, binary_operator):
return [
binary_operator(transform_node(src), transform_node(dst))
for src, dst in link_examples
]
|
import torch
from model.base_semantic_segmentation_model import SemanticSegmentationModel
from metrics.iou import iou_coefficient
class UNET(SemanticSegmentationModel):
def __init__(
self, in_channels=3, out_channels=1, features=[64, 128, 256, 512], normalize_output=False
):
super().__init__(in_channels, out_channels, features, normalize_output)
def process_prediction(self, predictions):
predictions = torch.nn.functional.softmax(predictions, dim=1)
predictions = torch.argmax(predictions, dim=1)
return predictions
def evaluate_metrics(self, loader):
self.eval()
with torch.no_grad():
iou_coefficients = []
for idx, (x, y) in enumerate(loader):
predictions = self(x)
predictions = self.process_prediction(predictions)
index = idx * predictions.shape[0]
for i, pred in enumerate(predictions):
iou_coefficients.append(iou_coefficient(y[i].numpy(), pred.numpy()))
index += 1
print(f"Average iou coefficient: {sum(iou_coefficients) / len(iou_coefficients)}")
self.train()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.