content stringlengths 5 1.05M |
|---|
import os
from flask import request, send_file, current_app
from app.core import ApiResponse, ApiPagedResponse
from . import api
from ..import db
from ..models import Deliverable, DeliverableFile, Permission
from .decorators import permission_required
from flask_jsonschema import validate
from sqlalchemy import or_
def create_get_files_query():
query = request.args.get('query', None, type=str)
if query is None:
deliverable_files = DeliverableFile.query
else:
search = "%{}%".format(query)
filters = [DeliverableFile.name.ilike(search), Deliverable.name.ilike(search)]
deliverable_files = DeliverableFile.query.join(DeliverableFile.deliverable_)
deliverable_files = deliverable_files.filter(or_(*filters))
return deliverable_files
@api.route('/files', methods=['GET'])
def get_files():
"""Return a paginated list of available files
**Example request**:
.. sourcecode:: http
GET /api/1.0/files?page=1 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
DO-Page-Next: http://do.cert.europa.eu/api/1.0/files?page=1
DO-Page-Prev: None
DO-Page-Current: 1
DO-Page-Item-Count: 8
{
"count": 8,
"first": "http://do.cert.europa.eu/api/1.0/files?per_page=20&page=1",
"items": [
{
"created": "2016-08-08T15:28:28",
"id": 2,
"name": "CIMBL-244-EU.zip",
"type": "CIMBL"
},
{
"created": "2016-08-08T10:36:31",
"id": 1,
"name": "CIMBL-244-EU.zip",
"type": "CIMBL"
}
],
"last": "http://127.0.0.1:5001/api/1.0/files?per_page=20&page=1",
"next": null,
"page": 1,
"per_page": 20,
"prev": null
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:resheader DO-Page-Next: Next page URL
:resheader DO-Page-Prev: Previous page URL
:resheader DO-Page-Curent: Current page number
:resheader DO-Page-Item-Count: Total number of items
:>json array items: Files
:>jsonarr integer id: File unique ID
:>jsonarr string name: File name
:>jsonarr string type: Deliverable type
For the list of available types see :http:get:`/api/1.0/deliverables`
:>jsonarr string created: Creation date
:>json integer page: Current page number
:>json integer prev: Previous page number
:>json integer next: Next page number
:>json integer count: Total number of items
:status 200: File found
:status 404: Resource not found
"""
deliverable_files = create_get_files_query()
return ApiPagedResponse(deliverable_files)
@api.route('/files/<int:file_id>', methods=['GET'])
def get_file(file_id):
"""Get file from database
**Example request**:
.. sourcecode:: http
GET /api/1.0/files/67 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"created": "2016-08-09T12:56:40",
"id": 8,
"name": "CIMBL-244-EU.zip",
"type": "CIMBL"
}
:param file_id: file's unique ID
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:>json integer id: File unique ID
:>json string name: File name
:>json string type: Deliverable type
:>json string date: Creation date
:status 200: File found
:status 404: Resource not found
"""
g = DeliverableFile.query.get_or_404(file_id)
return ApiResponse(g.serialize())
@api.route('/files/<int:file_id>/contents', methods=['GET'])
def download_file(file_id):
"""Download file
**Example request**:
.. sourcecode:: http
GET /api/1.0/files/1 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
Content-Disposition: attachment; filename=CIMBL-244-EU.zip
Content-Length: 55277
Content-Type: application/zip
:param file_id: file's unique ID
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:status 200: File found
:status 404: Resource not found
"""
dfile = DeliverableFile.query.filter_by(id=file_id).first_or_404()
cfg = current_app.config
return send_file(os.path.join(cfg['APP_UPLOADS'], dfile.name),
attachment_filename=dfile.name, as_attachment=True)
@api.route('/files', methods=['POST', 'PUT'])
@validate('deliverables', 'add_files')
def add_file():
"""Save file names to database.
This endpoing should be called only after files have been uploaded via
:http:post:`/api/1.0/upload`
**Example request**:
.. sourcecode:: http
POST /api/1.0/files HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: application/json
{
"deliverable_id": 2,
"is_sla": 1,
"files": [
"test.gz"
]
}
**Example response**:
.. sourcecode:: http
HTTP/1.0 201 CREATED
Content-Type: application/json
{
"files": [
"test.gz"
],
"message": "Files added"
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:<json string deliverable_id: ID of deliverable.
For a full list of available IDs see :http:get:`/api/1.0/deliverables`
:<json integer is_sla: When set file is for SLA constituents only
:<json array files: List of files to save
:>json array files: List of saved files
:>json string message: Status message
:status 201: GPG key successfully saved
:status 400: Bad request
"""
files = request.json.pop('files')
for f in files:
dfile = DeliverableFile.fromdict(request.json)
dfile.name = f
db.session.add(dfile)
db.session.commit()
return ApiResponse({'files': files, 'message': 'Files added'}, 201)
@api.route('/files/<int:file_id>', methods=['DELETE'])
@permission_required(Permission.ADMINISTER)
def delete_file(file_id):
"""Delete file from database
Files are not currently removed from the filesystem.
**Example request**:
.. sourcecode:: http
DELETE /api/1.0/files/67 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"message": "File deleted"
}
:param file_id: file's unique ID.
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:>json string message: File delete status
:status 200: File was deleted
:status 404: File was not found
"""
g = DeliverableFile.query.filter(
DeliverableFile.id == file_id
).first_or_404()
g.deleted = 1
db.session.add(g)
db.session.commit()
return ApiResponse({'message': 'File deleted'})
|
from pathlib import Path
from unittest.mock import call, patch, mock_open
import unittest
from adk.exceptions import InvalidPathName, JsonFileNotFound, MalformedJsonFile
from adk.utils import copy_files, get_dummy_application, get_py_dummy, read_json_file, reorder_data, write_json_file, \
write_file, validate_path_name
class TestUtils(unittest.TestCase):
def setUp(self) -> None:
self.path = Path("dummy")
self.roles = ["role1", "role2"]
self.invalid_name = "invalid/name"
def test_write_json_file(self):
with patch("adk.utils.open") as open_mock, \
patch("adk.utils.json.dump") as json_dump_mock:
write_json_file(self.path, {})
open_mock.assert_called_once()
json_dump_mock.assert_called_once()
def test_write_file(self):
with patch("adk.utils.open") as open_mock:
write_file(self.path, {})
open_mock.assert_called_once()
def test_read_json_file_valid(self):
dummy_apps_config = "{" \
"\"app_1\" : {\"path\": \"/some/path/\"," \
"\"application_id\": 1}," \
"\"app_2\" : {\"path\": \"/some/path/\"," \
"\"application_id\": 2}" \
"}"
with patch('adk.utils.open', mock_open(read_data=dummy_apps_config)):
data = read_json_file(self.path)
self.assertEqual(len(data), 2)
self.assertIn('app_1', data)
self.assertIn('app_2', data)
def test_read_json_file_invalid(self):
malformed_apps_config = "{" \
"\"app_1\" {\"path\" \"/some/path/\"," \
"\"application_id\" 1}," \
"\"app_2\" {\"path\" \"/some/path/\"," \
"\"application_id\" 2}" \
"}"
with patch('adk.utils.open', mock_open(read_data=malformed_apps_config)):
self.assertRaises(MalformedJsonFile, read_json_file, self.path)
def test_read_json_file_not_found(self):
with patch('adk.utils.open', side_effect=FileNotFoundError):
self.assertRaises(JsonFileNotFound, read_json_file, self.path)
def test_reorder_data(self):
dict_1 = {'k1': 1, "k3": 3, "k2": 2}
dict_2 = {'k1': 2, "k2": 4, "k3": 6}
dict_3 = {'k2': 6, "k3": 9, "k1": 3}
data_list = [dict_1, dict_2, dict_3]
desired_order = ["k3", "k2", "k1"]
reordered_data = reorder_data(data_list, desired_order)
for item in reordered_data:
key_list = list(item)
for i, key in enumerate(desired_order):
self.assertIn(key, item)
self.assertEqual(key_list[i], key)
def test_get_dummy_application(self):
get_dummy_application(self.roles)
def test_get_py_dummy(self):
get_py_dummy()
def test_validate_path_name(self):
self.assertRaises(InvalidPathName, validate_path_name, "object", self.invalid_name)
def test_copy_files(self):
with patch("adk.utils.os.path.isfile") as isfile_mock, \
patch("adk.utils.os.listdir") as listdir_mock, \
patch("adk.utils.shutil.copy") as copy_mock, \
patch("adk.utils.os.path.join") as join_mock:
isfile_mock.return_value = True
listdir_mock.return_value = ['file1', 'file2']
join_mock.side_effect = ['file1_path', 'file2_path']
copy_files(Path("source"), Path("dest"))
join_calls = [call(Path("source"), 'file1'), call(Path("source"), 'file2')]
join_mock.assert_has_calls(join_calls)
copy_calls = [call("file1_path", Path("dest")), call("file2_path", Path("dest"))]
copy_mock.assert_has_calls(copy_calls)
|
# Generated by Django 2.2.8 on 2020-01-12 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0004_auto_20200112_0959'),
]
operations = [
migrations.AddField(
model_name='adminprofile',
name='email',
field=models.EmailField(default='', max_length=254),
),
]
|
from __future__ import annotations
from ctc import binary
from ctc import spec
from . import function_parsing
def get_event_hash(event_abi: spec.EventABI) -> str:
"""compute event hash from event's abi"""
signature = get_event_signature(event_abi=event_abi)
return binary.keccak_text(signature)
def get_event_signature(event_abi: spec.EventABI) -> str:
arg_types = [var['type'] for var in event_abi['inputs']]
arg_types = [
function_parsing.get_function_selector_type(item) for item in arg_types
]
inputs = ','.join(arg_types)
return event_abi['name'] + '(' + inputs + ')'
def get_event_unindexed_types(
event_abi: spec.EventABI,
) -> list[spec.ABIDatumType]:
"""get list of data types in signature of event"""
return [var['type'] for var in event_abi['inputs'] if not var['indexed']]
def get_event_unindexed_names(event_abi: spec.EventABI) -> list[str]:
"""get list of data names in signature of event"""
return [var['name'] for var in event_abi['inputs'] if not var['indexed']]
def get_event_indexed_names(event_abi: spec.EventABI) -> list[str]:
"""get list of indexed names in signature of event"""
return [var['name'] for var in event_abi['inputs'] if var['indexed']]
def get_event_indexed_types(
event_abi: spec.EventABI,
) -> list[spec.ABIDatumType]:
"""get list of indexed types in signature of event"""
return [var['type'] for var in event_abi['inputs'] if var['indexed']]
|
import unittest
import time
from pidevices.mcp23017 import MCP23017
from pidevices import Mcp23017GPIO
from pidevices.exceptions import NotInputPin
class TestMcp23017GPIO(unittest.TestCase):
def test_add_pins(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
self.assertEqual(interface.pins['echo'].pin_num, 1, "Should be 1")
self.assertEqual(interface.pins['trigger'].pin_num, 10, "Should be 10")
def test_remove_pins(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
num_pins = len(interface.pins)
self.assertEqual(num_pins, 2, "Len should be 2")
interface.remove_pins("echo", "trigger")
num_pins = len(interface.pins)
self.assertEqual(num_pins, 0, "Len should be 1")
def test_read(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
interface.set_pin_function("echo", "input")
value = interface.read("echo")
#self.assertEqual(value, 1, "The read value should be 1.")
def test_set_pin_function(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
function = "input"
interface.set_pin_function("echo", function)
self.assertEqual(interface.pins["echo"].function, function,
"Should be {}".format(function))
function = "output"
interface.set_pin_function("trigger", function)
self.assertEqual(interface.pins["trigger"].function, function,
"Should be {}".format(function))
with self.assertRaises(TypeError):
interface.set_pin_function("trigger", "aa")
def test_set_pin_pull(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
pull = "up"
pin = "echo"
interface.init_input(pin, pull)
self.assertEqual(interface.pins[pin].pull, pull,
"Pull should be {}".format(pull))
with self.assertRaises(TypeError):
interface.set_pin_pull("echo", "aa")
with self.assertRaises(NotInputPin):
interface.set_pin_pull("trigger", "up")
def test_set_pin_edge(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
interface.init_input("echo", "down")
# Test exception
with self.assertRaises(TypeError):
interface.set_pin_edge("echo", "aaa")
pin_num = interface.PIN_NUMBER_MAP[interface.pins["echo"].pin_num]
# Check rising
interface.set_pin_edge("echo", "rising")
intcon = interface._device.get_pin_intcon(pin_num)
def_val = interface._device.get_pin_def_val(pin_num)
self.assertEqual(intcon, 1, "Should be 1")
self.assertEqual(def_val, 0, "Should be 0")
# Check falling
interface.set_pin_edge("echo", "falling")
intcon = interface._device.get_pin_intcon(pin_num)
def_val = interface._device.get_pin_def_val(pin_num)
self.assertEqual(intcon, 1, "Should be 1")
self.assertEqual(def_val, 1, "Should be 1")
# Check both
interface.set_pin_edge("echo", "both")
intcon = interface._device.get_pin_intcon(pin_num)
def_val = interface._device.get_pin_def_val(pin_num)
self.assertEqual(intcon, 0, "Should be 0")
self.assertEqual(def_val, 0, "Should be 0")
def test_set_pin_bounce(self):
interface = Mcp23017GPIO(echo='A_1', trigger='B_2')
interface.init_input("echo", "down")
# Test exception
with self.assertRaises(TypeError):
interface.set_pin_bounce("echo", 12.2)
pin_num = interface.PIN_NUMBER_MAP[interface.pins["echo"].pin_num]
val = 100
interface.set_pin_bounce("echo", 100)
bounce = interface._device._debounce[pin_num]
self.assertEqual(bounce, val/1000, "Should be 100")
def test_set_pin_event(self):
interface = Mcp23017GPIO(echo='A_0', trigger='A_1')
interface.init_input("echo", "down")
interface.init_input("trigger", "down")
def f(pin):
print("{} Rising edge signal on pin {}.".format(f.c, pin))
f.c += 1
f.c = 0
def f_1(pin):
print("{} Falling edge signal on pin {}.".format(f_1.c, pin))
f_1.c += 1
f_1.c = 0
echo_pin_num = interface.PIN_NUMBER_MAP[interface.pins["echo"].pin_num]
trigger_pin_num = \
interface.PIN_NUMBER_MAP[interface.pins["trigger"].pin_num]
interface.set_pin_edge("echo", "rising")
interface.set_pin_edge("trigger", "falling")
interface.set_pin_bounce("echo", 200)
interface.set_pin_bounce("trigger", 1000)
interface.set_pin_event("echo", f, echo_pin_num)
interface.set_pin_event("trigger", f_1, trigger_pin_num)
interface.start_polling(["echo", "trigger"])
time.sleep(5)
interface.stop_polling()
interface.close()
def test_wait_pin_for_edge(self):
interface = Mcp23017GPIO(echo='A_0', trigger='A_1')
interface.init_input("echo", "down")
interface.set_pin_edge("echo", "rising")
val = interface.wait_pin_for_edge("echo")
self.assertEqual(val, 1, "Should be 1")
val = interface.wait_pin_for_edge("echo", timeout=2000)
self.assertEqual(val, 0, "Should be 0")
if __name__ == "__main__":
unittest.main()
|
import cvxpy as cp
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from shapely.geometry import box, Point, LineString, Polygon, MultiPolygon
from shapely.affinity import scale
from scipy.spatial import distance_matrix
from matplotlib.animation import FuncAnimation
from ortools.constraint_solver import routing_enums_pb2, pywrapcp
class Room:
"""Represents the geometries of a room and its guarded region."""
def __init__(self, filename, room_res=1000, guard_res=1000, guard_scale=1):
self.gdf = gpd.read_file(filename)
self.guard_scale = guard_scale
self.room = self.gdf[self.gdf['type'] == 'room'].iloc[0].geometry
self.guard = box(*(scale(self.room, guard_scale, guard_scale).bounds))
for obs in self.gdf[self.gdf['type'] == 'obstacle'].geometry:
self.guard = self.guard.difference(obs)
self.guard = self.guard.intersection(self.room)
self.room_grid, self.room_cells, self.room_epsilon = self._grid(self.room, room_res)
self.guard_grid, self.guard_cells, self.guard_epsilon = self._grid(self.guard, guard_res)
@property
def guard_geodesic_center(self):
"""Finds the best guard grid approximation of the room grid's geodesic center."""
# The geodesic center minimizes the maximum distance to any point.
dist = distance_matrix(self.guard_grid, self.room_grid)
return np.argmin(np.max(dist, axis=1))
def _grid(self, geom, res):
"""Returns points within a geometry (gridded over its bounding box).
Points on the grid inside the bounding box but outside the geometry
are rejected.
:param res: The number of points in the bounding box's grid (approx.)
"""
minx, miny, maxx, maxy = geom.bounds
aspect = (maxy - miny) / (maxx - minx)
n_x_points = int(np.ceil(np.sqrt(res / aspect)))
n_y_points = int(np.ceil(np.sqrt(res)))
x_arr, x_epsilon = np.linspace(minx, maxx, n_x_points, retstep = True)
y_arr, y_epsilon = np.linspace(miny, maxy, n_y_points, retstep = True)
xx, yy = np.meshgrid(x_arr, y_arr)
filtered_points = []
filtered_cells = []
for x, y in zip(xx.flatten(), yy.flatten()):
is_in_geom, data = self._get_grid_cell(x, y, x_epsilon, y_epsilon, geom)
if is_in_geom:
cells, cell_points = data
filtered_points.extend([(point.x, point.y) for point in cell_points])
filtered_cells.extend(cells)
# Every point in the room is within epsilon of a point in the grid
grid_epsilon = np.sqrt(x_epsilon**2 + y_epsilon**2)
return np.array(filtered_points), np.array(filtered_cells), grid_epsilon
def _get_grid_cell(self, x, y, x_epsilon, y_epsilon, geom):
"""Computes a grid cell, the intersection of geom and rectangle centered on (x, y)
Returns a boolean indicating if the grid cell is empty and a data object.
If the grid cell is not empty, `data` is tuple that contains
a list of simple polygons (shapely.Polygon) that compose the interseciton
and a list of representatives points (shapely.Point) inside the polygons
Throws an error if the grid cell is not a simple polygon.
"""
minx = x - x_epsilon/2
maxx = x + x_epsilon/2
miny = y - y_epsilon/2
maxy = y + y_epsilon/2
unfiltered_cell = box(minx = minx, miny = miny, maxx = maxx, maxy = maxy)
intersection = geom.intersection(unfiltered_cell)
if intersection.is_empty:
is_in_geom = False
data = None
elif isinstance(intersection, Polygon):
assert intersection.is_simple, "Increase grid resolution to ensure grid cells are simple polygons"
is_in_geom = True
cells = [intersection]
cell_points = [intersection.representative_point()]
data = (cells, cell_points)
elif isinstance(intersection, MultiPolygon):
is_in_geom = True
cells = list(intersection)
cell_points = [cell.representative_point() for cell in cells]
data = (cells, cell_points)
else:
# This should never happen...
assert(False)
return is_in_geom, data
|
# -*- coding: utf-8 -*-
# Time : 2022/1/30 1:49
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
from services.bricklayer import Bricklayer
from services.explorer import Explorer
from services.settings import logger
from services.utils import (
ToolBox
)
SILENCE = True
bricklayer = Bricklayer(silence=SILENCE)
explorer = Explorer(silence=SILENCE)
def run():
# 扫描商城促销活动,并返回“0折”商品的名称与商城链接
limited_free_game_objs = explorer.get_the_limited_free_game()
if not limited_free_game_objs.get("urls"):
return
urls = limited_free_game_objs["urls"]
# 刷新身份令牌
if not bricklayer.cookie_manager.refresh_ctx_cookies(verify=True):
return
ctx_cookies = bricklayer.cookie_manager.load_ctx_cookies()
# 优先处理常规情况 urls.__len__() == 1
for url in urls:
logger.debug(ToolBox.runtime_report(
motive="STARTUP",
action_name="ScaffoldClaim",
message="🍜 正在为玩家领取周免游戏",
game=f"『{limited_free_game_objs[url]}』"
))
bricklayer.get_free_game(
page_link=url,
ctx_cookies=ctx_cookies,
challenge=True
)
|
import glob
import logging
import os
import re
from itertools import chain
from django.core.management.base import BaseCommand
from cms.models import CMSPlugin, Placeholder, StaticPlaceholder
from djangocms_alias.models import Category, Alias, AliasContent
from djangocms_alias.constants import DEFAULT_STATIC_ALIAS_CATEGORY_NAME
from djangocms_versioning.models import Version
from djangocms_versioning.constants import DRAFT, PUBLISHED
from djangocms_4_migration.helpers import get_or_create_migration_user
logger = logging.getLogger(__name__)
"""
Steps for migrating static_placeholders to aliases from CMS 3.5 to CMS 4.0
--------------------------------------------------------------------------
for each existing static_placeholder:
Remap the static_placeholder's contents to the newly formed static_alias
for each template:
Edit the template file to replace 'static_placeholder' with 'static_alias' and remove any 'site' attribute
Remove all static_placeholders (their contents will now be empty)
"""
def _process_templates(process_addons=True):
"""
Processes templates in a project and optionally in packages to:
- Replace the template tag "static_placeholder" with "static_alias"
- Add the load tag "djangocms_alias_tags" to provide the functionality required for "static_alias"
"""
logger.info(f'Started processing templates')
# Scan all template files and replace '{% static_placeholder <name> [site] %}' with '{% static_alias <name> [site] %}'
templates = []
paths = ['templates']
# If this is a project with local addons, we may need to convert them too.
if process_addons:
addon_tamplate_dirs = glob.glob('./addons-dev/**/templates', recursive=True)
paths = [*paths, *addon_tamplate_dirs]
for root, dirs, files in chain.from_iterable(os.walk(path) for path in paths):
logger.info(f'Searching path: {root}')
for file in files:
if not file.endswith('.html'):
continue
filename = os.path.join(root, file)
logger.info(f'Analysing template: {filename}')
with open(filename, 'r') as f:
contents = f.read()
# Attempt to replace static_placeholder tags with static_alias tags
contents, placeholder_replacement_count = re.subn(
r'{% *static_placeholder ', r'{% static_alias ', contents)
# If no replacements happened, continue
if not placeholder_replacement_count:
continue
# If replacements were made we need to be sure that the alias tag import is present
contents, alias_load_tag_count = re.subn(
r'{% load cms_tags ', r'{% load cms_tags djangocms_alias_tags ', contents)
with open(filename, 'w') as f:
f.write(contents)
logger.info(f'Changes made to template: {filename}')
templates.append(file)
logger.info(f'Templates modified: {templates}')
def _get_or_create_alias_category():
# Parlers get_or_create doesn't work well with translations, so we must perform our own get or create
default_category = Category.objects.filter(translations__name=DEFAULT_STATIC_ALIAS_CATEGORY_NAME).first()
if not default_category:
default_category = Category.objects.create(name=DEFAULT_STATIC_ALIAS_CATEGORY_NAME)
return default_category
def _get_or_create_alias(category, static_code, site):
alias_filter_kwargs = {
'static_code': static_code,
}
# Site
if site:
alias_filter_kwargs['site'] = site
else:
alias_filter_kwargs['site_id__isnull'] = True
# Try and find an Alias to render
alias = Alias.objects.filter(**alias_filter_kwargs).first()
# If there is no alias found we need to create one
if not alias:
alias_creation_kwargs = {
'static_code': static_code,
'creation_method': Alias.CREATION_BY_TEMPLATE
}
# Site
if site:
alias_creation_kwargs['site'] = site
alias = Alias.objects.create(category=category, **alias_creation_kwargs)
logger.info(f'Created Alias: {alias}')
return alias
def _create_alias_content(alias, name, language, user, state=PUBLISHED):
alias_content = AliasContent.objects.create(
alias=alias,
name=name,
language=language,
)
Version.objects.create(content=alias_content, created_by=user, state=state)
logger.info(f'Created AliasContent {alias_content}')
return alias_content
def _remap_static_placeholder_plugins_to_static_alias(static_placeholder_id, static_placeholder_code,
alias, migration_user, version_state=PUBLISHED):
published_plugins = CMSPlugin.objects.filter(placeholder_id=static_placeholder_id)
# Group the plugins by their language because in cms3 placeholders contain all of the contents
# in all languages vs in cms4 each language has it's own placeholder
plugin_language_groups = {}
for plugin in published_plugins:
if plugin.language not in plugin_language_groups:
plugin_language_groups[plugin.language] = []
plugin_language_groups[plugin.language].append(plugin)
# For every language
for language in plugin_language_groups.keys():
logger.info(f'Processing plugin language: {language}')
plugin_set = plugin_language_groups[language]
alias_content = _create_alias_content(alias, static_placeholder_code, language, migration_user, version_state)
alias_placeholder_id = alias_content.placeholder.id
# Move the plugins into the Alias
for plugin in plugin_set:
plugin.placeholder_id = alias_placeholder_id
plugin.save()
def _process_static_placeholders():
alias_category = _get_or_create_alias_category()
# Rescan each page, this will create a <slot> placeholder for each page if it doesn't already exist!
migration_user, created = get_or_create_migration_user()
# for each existing static_placeholder:
for static_placeholder in StaticPlaceholder.objects.all():
logger.info(f'Processing static_placeholder {static_placeholder}')
# Get or create Alias
alias = _get_or_create_alias(alias_category, static_placeholder.code, static_placeholder.site)
_remap_static_placeholder_plugins_to_static_alias(
static_placeholder.public_id, static_placeholder.code, alias, migration_user)
# If new draft changes are pending "dirty" create a new draft version with those changes
if static_placeholder.dirty:
_remap_static_placeholder_plugins_to_static_alias(
static_placeholder.draft_id, static_placeholder.code, alias, migration_user, version_state=DRAFT)
def _cleanup():
logger.info('Cleaning up')
for static_placeholder in StaticPlaceholder.objects.all():
# Remove the global placeholders associated with the static_placeholder
Placeholder.objects.filter(id=static_placeholder.public_id).delete()
Placeholder.objects.filter(id=static_placeholder.draft_id).delete()
static_placeholder.delete()
class Command(BaseCommand):
help = 'Convert static_placeholders to static_alias tags'
def handle(self, *args, **options):
logger.info('Starting conversion from static_placeholder to static_alias')
_process_templates()
_process_static_placeholders()
_cleanup()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0008_auto_20160310_1222'),
]
operations = [
migrations.AddField(
model_name='projectvolume',
name='host_path',
field=models.CharField(max_length=512, blank=True),
),
migrations.AddField(
model_name='projectvolume',
name='mode',
field=models.CharField(default=b'RW', max_length=2, choices=[(b'RW', '\u8bfb\u5199'), (b'RO', '\u53ea\u5199')]),
),
]
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
firstname = forms.CharField(max_length=20)
lastname = forms.CharField(max_length=20)
class Meta:
model = User
fields = ['username', 'email', 'firstname', 'lastname', 'password1', 'password2']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Display Name'
self.fields['firstname'].label = 'Firstname'
self.fields['lastname'].label = 'Lastname'
self.fields['email'].label = 'Email Address'
|
from objects.CSCG._3d.mesh.domain.regions.region.types_wrt_metric.base import TypeWr2MetricBase
import numpy as np
from root.config.main import cOmm, rAnk
from objects.CSCG._3d.mesh.elements.element.types_wrt_metric.chaotic import ChaoticElement
from objects.CSCG._3d.mesh.trace.elements.element.types_wrt_metric.chaotic import ChaoticTraceElement
class Chaotic(TypeWr2MetricBase):
"""
Chaotic regions is the default regions type. If we do not mention the regions type in the domain input file,
we will use this regions type as its type.
If a regions is classified as a chaotic regions, then all the elements in this region will also be chaotic.
Therefore, we say that all elements are different. As a result, when we compute, for example, the Jacobian
of elements, we have to do it for all elements. So, we should better avoid this.
"""
def __init__(self, region):
super().__init__(region)
_ = self.mark
self._freeze_self_()
@property
def mark(self):
if self._mark_ is None:
if rAnk == 0:
self._mark_ = 'chaotic:' + str(id(self))
else:
self._mark_ = None
self._mark_ = cOmm.bcast(self._mark_, root=0)
return self._mark_
def ___CLASSIFY_ELEMENT_of_spacing___(self, spacing: tuple) -> ChaoticElement:
assert np.shape(spacing) == (3,2), "I need a spacing of shape (3,2) to represent an element in a regions."
assert all([0 <= spacing[i][0] < spacing[i][1] <= 1 for i in range(3)]), f"spacing={spacing} is wrong."
return ChaoticElement()
def ___CLASSIFY_TRACE_ELEMENT_of_spacing___(self, trace_spacing: tuple) -> ChaoticTraceElement:
"""
:param trace_spacing: the trace_spacing representing a trace element.
:return:
"""
return ChaoticTraceElement()
|
##############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is #
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, express or implied. See the License for the specific language #
# governing permissions and limitations under the License. #
##############################################################################
import yorm
from yorm.types import String
from yorm.types import List, AttributeDictionary
@yorm.attr(name=String)
@yorm.attr(value=String)
class SSM(AttributeDictionary):
def __init__(self, name, value):
super().__init__()
self.name = name
self.value = value
@yorm.attr(all=SSM)
class SSMList(List):
def __init__(self):
super().__init__()
@yorm.attr(all=String)
class RegionsList(List):
def __init__(self):
super().__init__()
@yorm.attr(all=String)
class AccountList(List):
def __init__(self):
super().__init__()
@yorm.attr(all=String)
class OUList(List):
def __init__(self):
super().__init__()
@yorm.attr(all=String)
class ApplyToOUList(List):
def __init__(self):
super().__init__()
@yorm.attr(name=String)
@yorm.attr(template_file=String)
@yorm.attr(parameter_file=String)
@yorm.attr(deploy_method=String)
@yorm.attr(ssm_parameters=SSMList)
@yorm.attr(regions=RegionsList)
@yorm.attr(deploy_to_account=AccountList)
@yorm.attr(deploy_to_ou=OUList)
class Resource(AttributeDictionary):
def __init__(self, name, template_file, parameter_file, deploy_method,
deploy_to_account, deploy_to_ou):
super().__init__()
self.name = name
self.template_file = template_file
self.parameter_file = parameter_file
self.deploy_method = deploy_method
self.deploy_to_account = []
self.deploy_to_ou = []
self.regions = []
self.ssm_parameters = []
@yorm.attr(all=Resource)
class ResourcesList(List):
def __init__(self):
super().__init__()
@yorm.attr(name=String)
@yorm.attr(policy_file=String)
@yorm.attr(description=String)
@yorm.attr(apply_to_accounts_in_ou=ApplyToOUList)
class Policy(AttributeDictionary):
def __init__(self, name, policy_file, description,
apply_to_accounts_in_ou):
super().__init__()
self.name = name
self.description = description
self.policy_file = policy_file
self.apply_to_accounts_in_ou = apply_to_accounts_in_ou
@yorm.attr(all=Policy)
class PolicyList(List):
def __init__(self):
super().__init__()
@yorm.attr(region=String)
@yorm.attr(version=String)
@yorm.attr(cloudformation_resources=ResourcesList)
@yorm.attr(organization_policies=PolicyList)
@yorm.sync("{self.manifest_file}", auto_create=False)
class Manifest:
def __init__(self, manifest_file):
self.manifest_file = manifest_file
self.organization_policies = []
self.cloudformation_resources = []
|
import argparse
import os
from multiprocessing import Pool
import logging
import random
import copy
import tensorflow as tf
from generic.data_provider.iterator import BasicIterator
from generic.tf_utils.evaluator import Evaluator
from generic.data_provider.image_loader import get_img_builder
from guesswhat.models.oracle.oracle_network import OracleNetwork
from guesswhat.models.qgen.qgen_lstm_network import QGenNetworkLSTM
from guesswhat.models.guesser.guesser_network import GuesserNetwork
from guesswhat.models.looper.basic_looper import BasicLooper
from guesswhat.models.qgen.qgen_wrapper import QGenWrapper, QGenUserWrapper
from guesswhat.models.oracle.oracle_wrapper import OracleWrapper, OracleUserWrapper
from guesswhat.models.guesser.guesser_wrapper import GuesserWrapper, GuesserUserWrapper
from guesswhat.data_provider.guesswhat_dataset import Dataset
from guesswhat.data_provider.looper_batchifier import LooperBatchifier
from guesswhat.data_provider.guesswhat_tokenizer import GWTokenizer
from generic.utils.config import load_config, get_config_from_xp
if __name__ == '__main__':
parser = argparse.ArgumentParser('Question generator (policy gradient baseline))')
parser.add_argument("-data_dir", type=str, required=True, help="Directory with data")
parser.add_argument("-img_dir", type=str, help='Directory with images to feed networks')
parser.add_argument("-img_raw_dir", type=str, help='Directory with images to display')
parser.add_argument("-crop_dir", type=str, help='Directory with crops')
parser.add_argument("-exp_dir", type=str, required=False, help="Directory to output dialogue")
parser.add_argument("-config", type=str, required=True, help='Config file')
parser.add_argument("-dict_file", type=str, default="dict.json", help="Dictionary file name")
parser.add_argument("-networks_dir", type=str, help="Directory with pretrained networks")
parser.add_argument("-oracle_identifier", type=str, default="156cb3d352b97ba12ffd6cf547281ae2", required=False , help='Oracle identifier - if none: user must be the oracle') # Use checkpoint id instead?
parser.add_argument("-qgen_identifier", type=str, default="7b24d8b68f94bde9774cd9555584fd93", required=False, help='Qgen identifier - if none: user must be the Qgen')
parser.add_argument("-guesser_identifier", type=str, required=False, help='Guesser identifier - if none: user must be the guesser')
parser.add_argument("-gpu_ratio", type=float, default=0.95, help="How many GPU ram is required? (ratio)")
args = parser.parse_args()
eval_config, exp_identifier, save_path = load_config(args.config, args.exp_dir)
# Load all networks configs
logger = logging.getLogger()
###############################
# LOAD DATA
#############################
# Load image
logger.info('Loading images..')
image_builder = get_img_builder(eval_config['image'], args.img_dir)
crop_builder = None
if eval_config.get('crop', False):
logger.info('Loading crops..')
crop_builder = get_img_builder(eval_config['crop'], args.crop_dir, is_crop=True)
# Load data
logger.info('Loading data..')
trainset = Dataset(args.data_dir, "train", image_builder, crop_builder)
validset = Dataset(args.data_dir, "valid", image_builder, crop_builder)
testset = Dataset(args.data_dir, "test", image_builder, crop_builder)
dataset, dummy_dataset = trainset, validset
dataset.games = trainset.games + validset.games + testset.games
dummy_dataset.games = []
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
# Load dictionary
logger.info('Loading dictionary..')
tokenizer = GWTokenizer(os.path.join(args.data_dir, args.dict_file))
###############################
# START TRAINING
#############################
# CPU/GPU option
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
###############################
# LOAD NETWORKS
#############################
if args.oracle_identifier is not None:
oracle_config = get_config_from_xp(os.path.join(args.networks_dir, "oracle"), args.oracle_identifier)
oracle_network = OracleNetwork(oracle_config, num_words=tokenizer.no_words)
oracle_var = [v for v in tf.global_variables() if "oracle" in v.name]
oracle_saver = tf.train.Saver(var_list=oracle_var)
oracle_saver.restore(sess, os.path.join(args.networks_dir, 'oracle', args.oracle_identifier, 'params.ckpt'))
oracle_wrapper = OracleWrapper(oracle_network, tokenizer)
else:
oracle_wrapper = OracleUserWrapper(tokenizer)
logger.info("No Oracle was registered >>> use user input")
if args.guesser_identifier is not None:
guesser_config = get_config_from_xp(os.path.join(args.networks_dir, "guesser"), args.guesser_identifier)
guesser_network = GuesserNetwork(guesser_config["model"], num_words=tokenizer.no_words)
guesser_var = [v for v in tf.global_variables() if "guesser" in v.name]
guesser_saver = tf.train.Saver(var_list=guesser_var)
guesser_saver.restore(sess, os.path.join(args.networks_dir, 'guesser', args.guesser_identifier, 'params.ckpt'))
guesser_wrapper = GuesserWrapper(guesser_network)
else:
guesser_wrapper = GuesserUserWrapper(tokenizer, img_raw_dir=args.img_raw_dir)
logger.info("No Guesser was registered >>> use user input")
if args.qgen_identifier is not None:
qgen_config = get_config_from_xp(os.path.join(args.networks_dir, "qgen"), args.qgen_identifier)
qgen_network = QGenNetworkLSTM(qgen_config["model"], num_words=tokenizer.no_words, policy_gradient=False)
qgen_var = [v for v in tf.global_variables() if "qgen" in v.name] # and 'rl_baseline' not in v.name
qgen_saver = tf.train.Saver(var_list=qgen_var)
qgen_saver.restore(sess, os.path.join(args.networks_dir, 'qgen', args.qgen_identifier, 'params.ckpt'))
qgen_network.build_sampling_graph(qgen_config["model"], tokenizer=tokenizer, max_length=eval_config['loop']['max_depth'])
qgen_wrapper = QGenWrapper(qgen_network, tokenizer,
max_length=eval_config['loop']['max_depth'],
k_best=eval_config['loop']['beam_k_best'])
else:
qgen_wrapper = QGenUserWrapper(tokenizer)
logger.info("No QGen was registered >>> use user input")
looper_evaluator = BasicLooper(eval_config,
oracle_wrapper=oracle_wrapper,
guesser_wrapper=guesser_wrapper,
qgen_wrapper=qgen_wrapper,
tokenizer=tokenizer,
batch_size=1)
logs = []
# Start training
final_val_score = 0.
batchifier = LooperBatchifier(tokenizer, generate_new_games=False)
while True:
# Start new game
while True:
id_str = input('Do you want to play a new game? (Yes/No) --> ').lower()
if id_str == "y" or id_str == "yes": break
elif id_str == "n" or id_str == "no": exit(0)
# Pick id image
image_id = 0
while True:
id_str = int(input('What is the image id you want to select? (-1 for random id) --> '))
if id_str in image_id_set:
image_id = id_str
break
elif id_str == -1:
image_id = random.choice(list(image_id_set.keys()))
break
else:
print("Could not find the following image id: {}".format(id_str))
game = [g for g in dataset.games if g.image.id == image_id][0]
game = copy.deepcopy(game)
print("Selecting image {}".format(game.image.filename))
# Pick id object
print("Available objects")
for i, obj in enumerate(game.objects):
print(" -", i, ":", obj.category, "\t", obj.bbox)
print("Type '(S)how' to display the image with the object")
while True:
id_str = input('Which object id do you want to select? (-1 for random id) --> ')
if id_str == "S" or id_str.lower() == "show":
game.show(img_raw_dir=args.img_raw_dir, display_index=True)
continue
id_str = int(id_str)
if 0 <= id_str < len(game.objects):
object_index = id_str
object_id = game.objects[object_index].id
break
elif id_str == -1:
object_id = random.choice(game.objects).id
break
else:
print("Could not find the following object index: {}".format(id_str))
game.object_id = object_id
dummy_dataset.games = [game]
iterator = BasicIterator(dummy_dataset, batch_size=1, batchifier=batchifier)
success = looper_evaluator.process(sess, iterator, mode="greedy")
|
import torch
# from pytorch_lightning.metrics import Metric
class MatchPrecision():
def __init__(self, name = None):
self.name = name
def __call__(self, scores, gt_scores, match_thresh = 0.2):
'''
scores: (n_batch, n_obs_kpt, n_model_kpt)
gt_scores: (n_batch, n_obs_kpt, n_model_kpt)
The dustbins should be already removed from the input
'''
assert scores.shape == gt_scores.shape
# Return 0 if no matching are detected
if scores.sum() == 0:
return torch.tensor(0, device=scores.device, dtype=scores.dtype)
# scores = scores > match_thresh
return (scores * gt_scores).sum() / scores.sum()
class MatchRecall():
def __init__(self, name = None):
self.name = name
def __call__(self, scores, gt_scores, match_thresh = 0.2):
'''
scores: (n_batch, n_obs_kpt, n_model_kpt)
gt_scores: (n_batch, n_obs_kpt, n_model_kpt)
The dustbins should be already removed from the input
'''
assert scores.shape == gt_scores.shape
# If there is no match in the groud truth
if gt_scores.sum() == 0:
if scores.sum() == 0:
return torch.tensor(1, device=scores.device, dtype=scores.dtype)
else:
return torch.tensor(0, device=scores.device, dtype=scores.dtype)
# scores = scores > match_thresh
return (scores * gt_scores).sum() / gt_scores.sum()
class ObsSegIoU():
def __init__(self, name = None):
self.name = name
def __call__(self, scores, gt_scores, match_thresh = 0.1):
'''
scores: (n_batch, n_obs_kpt, n_model_kpt)
gt_scores: (n_batch, n_obs_kpt, n_model_kpt)
The dustbins should be already removed from the input
'''
assert scores.shape == gt_scores.shape
match_thresh = torch.tensor([match_thresh], device=scores.device, dtype=scores.dtype)
gt_seg = gt_scores.max(-1)[0] >= match_thresh
seg = scores.max(-1)[0] >= match_thresh
inter = torch.logical_and(gt_seg, seg).sum().float()
union = torch.logical_or(gt_seg, seg).sum().float()
if union == 0:
iou = torch.tensor(0, device=scores.device, dtype=scores.dtype)
else:
iou = inter / union
return iou
|
"""Module with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias` and `put_to_history`
methods.
"""
from collections import defaultdict
from subprocess import Popen, PIPE
from time import time
import os
from psutil import Process
from .utils import DEVNULL
class Generic(object):
def _get_aliases(self):
return {}
def _expand_aliases(self, command_script):
aliases = self._get_aliases()
binary = command_script.split(' ')[0]
if binary in aliases:
return command_script.replace(binary, aliases[binary], 1)
else:
return command_script
def from_shell(self, command_script):
"""Prepares command before running in app."""
return self._expand_aliases(command_script)
def to_shell(self, command_script):
"""Prepares command for running in shell."""
return command_script
def app_alias(self):
return "\nalias fuck='eval $(thefuck $(fc -ln -1))'\n"
def _get_history_file_name(self):
return ''
def _get_history_line(self, command_script):
return ''
def put_to_history(self, command_script):
"""Puts command script to shell history."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with open(history_file_name, 'a') as history:
history.write(self._get_history_line(command_script))
def and_(self, *commands):
return ' && '.join(commands)
class Bash(Generic):
def app_alias(self):
return "\nalias fuck='eval $(thefuck $(fc -ln -1)); history -r'\n"
def _parse_alias(self, alias):
name, value = alias.replace('alias ', '', 1).split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
def _get_aliases(self):
proc = Popen('bash -ic alias', stdout=PIPE, stderr=DEVNULL, shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.bash_history'))
def _get_history_line(self, command_script):
return u'{}\n'.format(command_script)
class Zsh(Generic):
def app_alias(self):
return "\nalias fuck='eval $(thefuck $(fc -ln -1 | tail -n 1)); fc -R'\n"
def _parse_alias(self, alias):
name, value = alias.split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
def _get_aliases(self):
proc = Popen('zsh -ic alias', stdout=PIPE, stderr=DEVNULL, shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.zsh_history'))
def _get_history_line(self, command_script):
return u': {}:0;{}\n'.format(int(time()), command_script)
class Tcsh(Generic):
def app_alias(self):
return "\nalias fuck 'set fucked_cmd=`history -h 2 | head -n 1` && eval `thefuck ${fucked_cmd}`'\n"
def _parse_alias(self, alias):
name, value = alias.split("\t", 1)
return name, value
def _get_aliases(self):
proc = Popen('tcsh -ic alias', stdout=PIPE, stderr=DEVNULL, shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '\t' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.history'))
def _get_history_line(self, command_script):
return u'#+{}\n{}\n'.format(int(time()), command_script)
shells = defaultdict(lambda: Generic(), {
'bash': Bash(),
'zsh': Zsh(),
'-csh': Tcsh(),
'tcsh': Tcsh()})
def _get_shell():
try:
shell = Process(os.getpid()).parent().cmdline()[0]
except TypeError:
shell = Process(os.getpid()).parent.cmdline[0]
return shells[shell]
def from_shell(command):
return _get_shell().from_shell(command)
def to_shell(command):
return _get_shell().to_shell(command)
def app_alias():
print(_get_shell().app_alias())
def put_to_history(command):
return _get_shell().put_to_history(command)
def and_(*commands):
return _get_shell().and_(*commands)
|
# Lab 1
# 12 October 2020
# Created by YongHua
# Python version 3.8
import math
def average(a, b):
return (a + b) * 0.5
print(average(10, 20))
print(average(10, 4))
def distance(a, b):
if a > b:
return a - b
else:
return b - a
print(distance(3, 4))
print(distance(3, 1))
def geometric_mean(a, b):
return math.sqrt(a * b)
print(geometric_mean(2, 2))
print(geometric_mean(2, 8))
print(geometric_mean(2, 1))
def pyramid_volume(A, h):
return (1/3) * A * h
print(pyramid_volume(1, 2)) |
#!/usr/bin/env python
# author: me@itzo.org
# version: 2.2
# description: Get current data from Bittrex for market exchange rates and amounts in order book
# Store the data in a sqlite database for future use
import urllib
import json
import datetime
import sys
import getopt
import sqlite3 as db
import os.path
# print usage
def usage():
print "usage: ./bittrex.py [-iph] -m market"
print " eg: ./bittrex.py -m BTC-ETH"
print
print " -i [--init] initializes a new sqlite database 'market.db'"
print " -p [--print] prints out history for given market"
print " -m [--market] specifies the market to use"
print " -h [--help] prints this menu"
# initialize the market.db if requested
def create_db():
try:
con = db.connect('market.db')
cur = con.cursor()
cur.execute('DROP TABLE IF EXISTS history')
cur.executescript("""
CREATE TABLE history( market TEXT,
date TIMESTAMP,
buyq INT,
sellq INT,
bid REAL,
ask REAL,
buy_orders INT,
sell_orders INT,
buy_min REAL,
sell_max REAL);""")
con.commit()
except db.Error, e:
if con:
con.rollback()
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
# update the db with the latest info
def db_insert(market,buyq,sellq,bid,ask,total_buy_orders,total_sell_orders,buy_min,sell_max):
if os.path.isfile('market.db'):
timestamp = int(datetime.datetime.now().strftime("%s"))
con = db.connect('market.db')
cur = con.cursor()
cur.execute('INSERT INTO history VALUES(?,?,?,?,?,?,?,?,?,?)', \
(market,timestamp,int(buyq),int(sellq),bid,ask,total_buy_orders,total_sell_orders,buy_min,sell_max));
con.commit()
else:
print "Can't find the database. Please specify the -i flag to create it.\n"
usage()
sys.exit(2)
# get the market data
def get_data(market):
# load JSON data from Bittrex API
url = 'https://bittrex.com/api/v1.1/public/getorderbook?market='+market+'&type=both&depth=50'
json_obj = urllib.urlopen(url)
data = json.load(json_obj)
# get the sum of all orders that we care about
buyq = 0
sellq = 0
# only top 'percent' % of each orderbook considered relevant data
percent = 25
bid = data['result']['buy'][0]['Rate']
total_buy_orders = len(data['result']['buy'])
buy_index = percent * total_buy_orders/100
buy_min = data['result']['buy'][buy_index]['Rate']
ask = data['result']['sell'][0]['Rate']
total_sell_orders = len(data['result']['sell'])
sell_index = percent * total_sell_orders/100
sell_max = data['result']['sell'][sell_index]['Rate']
for item in data['result']['buy']:
if item['Rate'] > buy_min:
buyq += item['Quantity']
for item in data['result']['sell']:
if item['Rate'] < sell_max:
sellq += item['Quantity']
db_insert(market,buyq,sellq,bid,ask,total_buy_orders,total_sell_orders,buy_min,sell_max)
#print "----------------------------------------"
#print "buy orders:\t\t"+str(total_buy_orders)
#print "buy_index:\t\t"+str(buy_index)
#print "MIN buy allowed:\t"+str(buy_min)
#print "----------------------------------------"
#print "sell orders:\t\t"+str(total_sell_orders)
#print "sell_index:\t\t"+str(sell_index)
#print "MAX sell allowed:\t"+str(sell_max)
#print "----------------------------------------"
# print market history
def print_history(market):
con = db.connect('market.db')
with con:
cur = con.cursor()
cur.execute('SELECT * from history')
con.commit()
rows = cur.fetchall()
print "%-8s %-10s %+8s %+8s %-10s %-10s %+5s %+5s %+10s %+10s" % \
('Market', 'Time', 'Buyq', 'Sellq', 'Bid', 'Ask', 'Buy#', 'Sell#', 'Buy min', 'Sell max')
for row in rows:
if row[0] == market:
print "%-8s %s %+8s %+8s %0.8f %0.8f %+5s %+5s %0.8f %0.8f" % \
(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])
#market,timestamp,int(buyq),int(sellq),bid,ask,total_buy_orders,total_sell_orders,buy_min,sell_max
# get cmd line options a.k.a. main... ;\
required_m = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'ipm:h', ['init', 'print', 'market=', 'help'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(2)
elif opt in ('-m', '--market'):
market = arg
get_data(market)
required_m = True
elif opt in ('-i', '--init'):
create_db()
elif opt in ('-p', '--print'):
print_history(market)
else:
usage()
sys.exit(2)
if required_m == False:
usage()
sys.exit(2)
|
# dupFinder.py
import os, sys
import hashlib
import stat
from pytube import YouTube
# duplicates file
def findDup(parentFolder):
# Dups in format {hash:[names]}
testing_path = 'W:\Work\PythianTask\TestingDir\\1'
# linking = gos.link(testing_path, 'tap')
# os.chmod(testing_path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# print(os.readlink(testing_path))
fileList = []
fileList = os.listdir(testing_path)
# print('>>>>>>>>>>>>>>>>>>>>>>>> Listing' % fileList)
count = 0
dups = {}
for dirName, subdirs, fileList in os.walk(parentFolder):
print('Scanning %s...' % dirName)
for filename in fileList:
# Get the path to the file
path = os.path.join(dirName, filename)
# Calculate hash
file_hash = hashfile(path)
# Add or append the file path
count = count + 1
if file_hash in dups:
dups[file_hash].append(path)
else:
dups[file_hash] = [path]
print('>>>>>>> Count : %d ', str(count))
return dups
# Joins two dictionaries
def joinDicts(dict1, dict2):
for key in dict2.keys():
if key in dict1:
dict1[key] = dict1[key] + dict2[key]
else:
dict1[key] = dict2[key]
def hashfile(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
if __name__ == '__main__':
if len(sys.argv) > 1:
dups = {}
folders = sys.argv[1:]
for i in folders:
# Iterate the folders given
if os.path.exists(i):
# Find the duplicated files and append them to the dups
joinDicts(dups, findDup(i))
else:
print('%s is not a valid path, please verify' % i)
sys.exit()
# printResults(dups)
else:
print('Usage: python dupFinder.py folder or python dupFinder.py folder1 folder2 folder3')
# import os
# import sys
#
#
# class testing_dir:
#
# def __init__(self):
# self.base_path = 'W:\Work\PythianTask\TestingDir'
# self.access_rights = 0o755
#
# def creating_dir(self, i):
# try:
# final_path = os.path.join(self.base_path, str(i))
# os.mkdir(final_path, self.access_rights)
# except OSError:
# print("Creation of the directory %s failed" % self.access_rights)
# else:
# print("Successfully created the directory %s" % self.access_rights)
#
# def bulk_dir(self, number_of_dir):
# for i in range(0, number_of_dir):
# self.creating_dir(i + 1)
# print(self.base_path)
# print(i)
#
# self.base_path = os.path.join(self.base_path, str(i + 1))
# print(self.base_path)
# for j in range(0, number_of_dir):
# self.creating_dir(j + 1)
# print(j)
#
# def walking_through_dir(self):
#
# return sum([len(dirs) for dirs in os.walk(self.base_path)])
# # for filename in os.listdir(self.base_path):
# # print(filename)
# # self.base_path = os.path.join(self.base_path, filename)
# # return self.walking_through_dir(self.base_path)
#
#
#
# # os.listdir(self.base_path)
#
# # return [os.path.join(d, f) for f in os.listdir(d)]
#
#
# # for filename in os.listdir(self.base_path):
# # print(filename)
#
#
# # print(os.listdir(self.base_path))
# # for subdir, dirs, files in os.walk(self.base_path):
#
#
#
# # print(1)
# # for subdir in dirs:
# # print('here')
# # print(os.path.join(dirs, subdir))
# # print('-----------------------------')
#
#
# if __name__ == '__main__':
# test = testing_dir()
# # test.creating_dir('tapan')
# print(test.walking_through_dir())
# # test.bulk_dir(5)
# # test.creating_dir('tapan')
# # test.creating_dir(1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for :mod:`~utilipy.decorators.func_io.dtype_decorator`."""
##############################################################################
# IMPORTS
# THIRD PARTY
import numpy as np
import pytest
# PROJECT-SPECIFIC
from utilipy.decorators.func_io import dtypeDecorator
##############################################################################
# PARAMETERS
x = 1
y = 2.2
z = [3, 4.4]
u, uu = "5", "5.5"
v = False
w = np.array([7, 8.8])
##############################################################################
# dtypeDecorator
def test_dtypeDecorator_blank():
"""Test no-conversion mode of dtypeDecorator."""
# defining function
@dtypeDecorator()
def func(x):
return x
# /def
assert func(x) == x
assert func(y) == y
assert func(z) == z
assert func(u) == u
assert func(uu) == uu
assert func(v) == v
assert (func(w) == w).all()
return
# /def
def test_dtypeDecorator_python_scalars():
"""Test standard use of dtypeDecorator."""
# defining function
@dtypeDecorator(in_dtype=[(0, int)], out_dtype=[(0, float)])
def func(x):
assert isinstance(x, int)
return x
# /def
assert isinstance(func(x), float)
assert isinstance(func(y), float)
# should fail
with pytest.raises(TypeError):
assert isinstance(func(z), float)
assert isinstance(func(u), float)
with pytest.raises(ValueError):
assert isinstance(func(uu), float)
assert isinstance(func(v), float)
with pytest.raises(TypeError):
assert isinstance(func(w), float)
return
# /def
# def test_dtypeDecorator_numpy_arrays():
# """test standard use of dtypeDecorator
# """
# # TODO
# # /def
# def test_dtypeDecorator_single_arg():
# """
# """
#
# @dtypeDecorator(inargs=(0, int))
# def func(x):
# return x
#
# # TODO test
#
# @dtypeDecorator(outargs=(0, int))
# def func(x):
# return x
#
# # TODO test
#
# @dtypeDecorator(inargs=(0, int), outargs=(1, int))
# def func(x, y):
# return x, y
#
# # TODO test
#
# return
# # /def
# def test_dtypeDecorator_string_arg():
# """
# """
#
# @dtypeDecorator(inargs=('all', int))
# def func(x):
# return x
#
# # TODO test
#
# @dtypeDecorator(outargs=('all', int))
# def func(x):
# return x
#
# # TODO test
#
# @dtypeDecorator(inargs=('all', int), outargs=('all', float))
# def func(x, y):
# return x, y
#
# # TODO test
#
# return
# # /def
# --------------------------------------------------------------------------
##############################################################################
# END
|
# Escreva um programa que pergunte a quantidade de km percorridos
# por um carro alugado e a quantidade de dias pelos quais ele foi alugado.
# Calcule o preço a pagar, sabendo que o carro custa R$ 60 por dia e R$ 0.15 por km rodado.
km = float(input('\nQuantos km rodados? '))
dias = int(input('Quantos dias alugados? '))
valor_km = km * 0.15
valor_dias = dias * 60
total = valor_km + valor_dias
print(f'\nForam percorridos {km:.0f}km ao custo de R$0.15 por km, o custo é de R${valor_km:.2f}.')
print(f'O carro ficou alugado por {dias:.0f} dias, ao custo de R$ 60 por dia, custará R$ {valor_dias:.2f}.')
print(f'\nO valor total a ser pago é de R$ {total:.2f}!\n') |
class Address:
def __init__(self, address1 = '', address2 = '', city = '', state = '', zip = '', country = ''):
self.address1 = address1
self.address2 = address2
self.city = city
self.state = state
self.zip = zip
self.country = country
def deserialize(address_dict):
return Address(address_dict['address1'], address_dict['address2'], address_dict['city'], address_dict['state'], address_dict['zip'], address_dict['country']) |
from setuptools import setup
from setuptools import find_packages
setup(name="layer_preview_proto",
version="0.0.1",
description="tf-layer-preview protobuffer definition library",
url="https://github.com/recogni/tf-layer-preview",
author="sabhiram",
install_requires=[
],
packages=["layer_preview_proto"],
package_dir={"layer_preview_proto": "gen/py"})
|
from typing import Union
import strawberry
from strawberry.types import Info
from reddit.comments.types import CommentType
__all__ = ("comment_create",)
@strawberry.input
class CommentCreateInput:
content: str
subreddit_id: strawberry.ID
post_id: strawberry.ID
@strawberry.type
class CommentCreateSuccess:
comment: CommentType
@strawberry.type
class CommentCreateError:
error: str
CommentCreateResult = Union[CommentCreateSuccess, CommentCreateError]
@strawberry.field(description="Creates a new comment on a post.")
async def comment_create(info: Info, input: CommentCreateInput) -> CommentCreateResult:
pass
|
from django.db import models
from django.utils.text import slugify
class Category(models.Model):
category_name = models.CharField(max_length=50)
def __str__(self):
return self.category_name
class Meta:
verbose_name='Kateqoriya'
class Post(models.Model):
category=models.ManyToManyField(Category)
titles=models.CharField(max_length=120,blank=False,verbose_name="Bashliq")
slugfy = models.SlugField(max_length=123, editable=False, null=False, unique=True)
content = models.TextField(blank=False,verbose_name="Mezmun")
image = models.ImageField(blank=True,verbose_name='məqalə üçün şəkil')
draft=models.BooleanField(default=False,verbose_name="Qaralama olaraq yadda saxlanilsin?")
create_time = models.DateTimeField(auto_now_add=True)
updated_time=models.DateTimeField(auto_now=True)
def __str__(self):
return self.titles
def get_image_or_none(self):
if self.image and hasattr(self.image,'url'):
return self.image.url
return None
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.slugfy = slugify(self.titles)
super(Post, self).save()
class Meta:
verbose_name = "Paylaşım"
verbose_name_plural = "Paylaşımlar"
ordering=['-create_time']
class Drafts(models.Model):
drafts=models.ManyToManyField(Post)
|
import unittest
from bc211 import dtos, exceptions
from common.testhelpers.random_test_values import a_string, a_phone_number
class TestOrganization(unittest.TestCase):
def test_throws_on_missing_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Organization(name='name')
def test_throws_on_missing_name(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Organization(id='id')
class TestLocation(unittest.TestCase):
def test_throws_on_missing_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Location(name='name', organization_id='organization_id')
def test_throws_on_missing_name(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Location(id='id', organization_id='organization_id')
def test_throws_on_missing_organization_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Location(id='id', name='name')
def test_can_create_with_none_value_for_services(self):
services = None
location = dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), services=services)
self.assertEqual(location.services, services)
def test_can_create_with_list_of_service_dtos_for_services(self):
service = dtos.Service(id=a_string(), name=a_string(),
organization_id=a_string(), site_id=a_string())
services = [service]
location = dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), services=services)
self.assertEqual(location.services, services)
def test_throws_on_single_service_for_services(self):
service = dtos.Service(id=a_string(), name=a_string(),
organization_id=a_string(), site_id=a_string())
with self.assertRaises(exceptions.InvalidTypeXmlParseException):
dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), services=service)
def test_throws_on_list_of_wrong_type_for_services(self):
services = [a_string()]
with self.assertRaises(exceptions.InvalidTypeXmlParseException):
dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), services=services)
def test_can_create_with_none_value_for_phone_numbers(self):
phone_numbers = None
location = dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), phone_numbers=phone_numbers)
self.assertEqual(location.services, phone_numbers)
def test_can_create_with_list_of_phone_at_location_dtos_for_phone_numbers(self):
phone_at_location = dtos.PhoneAtLocation(location_id=a_string(),
phone_number_type_id=a_string(),
phone_number=a_phone_number())
phones_at_location = [phone_at_location]
location = dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), phone_numbers=phones_at_location)
self.assertEqual(location.phone_numbers, phones_at_location)
def test_throws_on_single_phone_at_location_for_phone_numbers(self):
phone_at_location = dtos.PhoneAtLocation(location_id=a_string(),
phone_number_type_id=a_string(),
phone_number=a_phone_number())
with self.assertRaises(exceptions.InvalidTypeXmlParseException):
dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), phone_numbers=phone_at_location)
def test_throws_on_list_of_wrong_type_for_phone_numbers(self):
phone_numbers = [a_string()]
with self.assertRaises(exceptions.InvalidTypeXmlParseException):
dtos.Location(id=a_string(), name=a_string(),
organization_id=a_string(), phone_numbers=phone_numbers)
class TestSpatialLocation(unittest.TestCase):
def test_can_create(self):
location = dtos.SpatialLocation(latitude='123.456', longitude='-23.456')
self.assertAlmostEqual(location.latitude, 123.456)
self.assertAlmostEqual(location.longitude, -23.456)
def test_throws_on_latitude_not_a_number(self):
with self.assertRaises(exceptions.InvalidFloatXmlParseException):
dtos.SpatialLocation(latitude='foo', longitude='-23.456')
def test_throws_on_longitude_not_a_number(self):
with self.assertRaises(exceptions.InvalidFloatXmlParseException):
dtos.SpatialLocation(latitude='123.456', longitude='foo')
class TestService(unittest.TestCase):
def test_can_create(self):
service = dtos.Service(id='id', name='name', organization_id='organization_id', site_id='site_id', description='description')
self.assertEqual(service.id, 'id')
self.assertEqual(service.name, 'name')
self.assertEqual(service.organization_id, 'organization_id')
self.assertEqual(service.site_id, 'site_id')
self.assertEqual(service.description, 'description')
def test_throws_on_missing_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(name='name', organization_id='organization_id',
site_id='site_id', description='description')
def test_throws_on_missing_name(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(id='id', organization_id='organization_id',
site_id='site_id', description='description')
def test_throws_on_missing_organization_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(id='id', name='name', site_id='site_id', description='description')
def test_throws_on_missing_site_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(id='id', name='name',
organization_id='organization_id', description='description')
class TestTaxonomyTerm(unittest.TestCase):
def test_can_create(self):
taxonomy_term = dtos.TaxonomyTerm(taxonomy_id='taxonomy_id', name='name')
self.assertEqual(taxonomy_term.taxonomy_id, 'taxonomy_id')
self.assertEqual(taxonomy_term.name, 'name')
def test_throws_on_missing_taxonomy_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(name='name')
def test_throws_on_missing_name(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.TaxonomyTerm(taxonomy_id='taxonomy_id')
class PhoneAtLocation(unittest.TestCase):
def test_can_create(self):
location_id = a_string()
phone_number_type_id = a_string()
phone_number = a_phone_number()
phone_at_location = dtos.PhoneAtLocation(location_id=location_id,
phone_number_type_id=phone_number_type_id,
phone_number=phone_number)
self.assertEqual(phone_at_location.location_id, location_id)
self.assertEqual(phone_at_location.phone_number_type_id, phone_number_type_id)
self.assertEqual(phone_at_location.phone_number, phone_number)
def test_throws_on_missing_location_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.PhoneAtLocation(phone_number_type_id=a_string(), phone_number=a_phone_number())
def test_throws_on_missing_phone_number_type_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.PhoneAtLocation(phone_number=a_phone_number(), location_id=a_string())
def test_throws_on_missing_phone_number(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.PhoneAtLocation(phone_number_type_id=a_string(), location_id=a_string())
class Address(unittest.TestCase):
def test_can_create(self):
address = dtos.Address(location_id='location_id', address_type_id='address_type_id',
city='city', country='country', address_lines='address_lines',
state_province='state_province', postal_code='postal_code')
self.assertEqual(address.location_id, 'location_id')
self.assertEqual(address.address_type_id, 'address_type_id')
self.assertEqual(address.city, 'city')
self.assertEqual(address.country, 'country')
self.assertEqual(address.address_lines, 'address_lines')
self.assertEqual(address.state_province, 'state_province')
self.assertEqual(address.postal_code, 'postal_code')
def test_throws_on_missing_location_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Address(address_type_id=a_string(), city=a_string(), country=a_string())
def test_throws_on_missing_address_type_id(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Address(location_id=a_string(), city=a_string(), country=a_string())
def test_throws_on_missing_city(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Address(location_id=a_string(), address_type_id=a_string(), country=a_string())
def test_throws_on_missing_country(self):
with self.assertRaises(exceptions.MissingRequiredFieldXmlParseException):
dtos.Address(location_id=a_string(), address_type_id=a_string(), city=a_string())
def test_can_create_with_no_address_lines(self):
address = dtos.Address(address_type_id=a_string(), location_id=a_string(), city=a_string(),
country=a_string(), state_province=a_string(), postal_code=a_string())
self.assertIsNone(address.address_lines)
def test_can_create_with_no_state_province(self):
address = dtos.Address(address_type_id=a_string(), location_id=a_string(), city=a_string(),
country=a_string(), address_lines=a_string(), postal_code=a_string())
self.assertIsNone(address.state_province)
def test_can_create_with_no_postal_code(self):
address = dtos.Address(address_type_id=a_string(), location_id=a_string(), city=a_string(),
country=a_string(), address_lines=a_string(), state_province=a_string())
self.assertIsNone(address.postal_code)
|
import os
import time
import urllib.request, urllib.parse, urllib.error
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from nadine.utils.backup import BackupManager
class Command(BaseCommand):
help = "Creates a backup containing an SQL dump and the media files."
requires_system_checks = False
def handle(self, *args, **options):
manager = BackupManager()
print((manager.make_backup()))
# Copyright 2018 Trevor F. Smith (http://trevor.smith.name/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
from ..ext.database import db
class Perfil(db.Model):
__tablename__ = "Perfil"
id_perfil = db.Column(db.Integer, primary_key=True)
refresh_rate = db.Column(db.Integer)
name = db.Column(db.String(30))
def __init__(self, name,refresh_rate):
self.name = name
self.refresh_rate = refresh_rate
db.create_all()
def dump(self, _indent=0):
return (
" " * _indent
+ repr(self)
+ "\n"
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#*
#* CreateID_pass.py
#*
#* Copyright (c) 2020 IWATA Daiki
#*
#* This software is released under the MIT License.
#* see http://opensource.org/licenses/mit-license
#*
import sys
import hashlib
import sqlite3
userID = input("Input userID: ")
passwd = input("Input password: ")
while True:
check = input("Are you sure to register this userID and password[y/n]?: ")
if(check == "y"):
break;
elif(check == "n"):
sys.exit();
else:
print("Please input [y] or [n].")
pwhash = hashlib.sha256(passwd.encode('utf-8')).hexdigest()
# print(pwhash)
conn = sqlite3.connect("testdb")
conn.execute("insert into users values( ?,? )", [userID, pwhash])
conn.commit()
|
import os
import sys
from game import Game
from client import Client
from botmanager import BotManager
from constants import Constants as Cts
from flask import Flask, render_template
from flask_sockets import Sockets
reload(sys)
sys.setdefaultencoding('utf8')
game = Game(Cts.LINES, Cts.COLUMNS, Cts.SLEEP_TIME)
bot_manager = BotManager(game, Cts.MAX_BOTS, Cts.SLEEP_TIME * 5)
game.init(bot_manager)
bot_manager.addBots(0)
bot_manager.start()
game.start()
# init app
app = Flask(__name__)
app.debug = 'DEBUG' in os.environ
# init websocket
sockets = Sockets(app)
@sockets.route('/')
def handle(ws):
# new client connected
client = Client(ws)
client.sendMessage("".join([Cts.MESSAGE_PLAYERS_SIZE, chr(len(game.clients))]), binary=True)
# FIXME: find a way to check here if client is still present
game.sendMap(client)
# wait for snake's data
data = ws.receive()
if data == None:
ws.close()
return
data_split = data.split(",")
data = data_split[0]
if (len(data) > 10):
data = data[0:10]
client.setNickname(data)
nickname = client.nickname
print(nickname, "connected")
game.addClient(client, int(data_split[1]))
game.sendClientData(client)
data_split = None
while not ws.closed:
# handle incoming messages
data = ws.receive()
if data and data[0] == '1':
game.moveSnake(client, data[2])
# terminate connection
game.removeClient(client)
print(nickname, "disconnected")
|
"""
Components for adding syntax highlighting to a `markyp-html` webpage using Highlight.js.
See https://www.npmjs.com/package/highlight.js.
"""
from typing import Optional, Tuple
from markyp import PropertyValue
from markyp_html import link, script
from markyp_html.block import pre
from markyp_html.inline import code as html_code
__author__ = "Peter Volf"
__copyright__ = "Copyright 2019, Peter Volf"
__email__ = "do.volfp@gmail.com"
__license__ = "MIT"
__url__ = "https://github.com/volfpeter/markyp-highlightjs"
__version__ = "0.1910.0"
__all__ = (
"CDN", "js", "themes", "highlight",
"bash", "css", "html", "javascript", "json", "markdown", "python", "sql", "xml"
)
class CDN(object):
"""
CDN information for Highlight.js.
"""
__slots__ = ()
CDN_URL: str = "https://cdnjs.cloudflare.com/ajax/libs/highlight.js"
VERSION: str = "9.15.6"
@classmethod
def cdn_url_with_version(cls) -> str:
"""
Returns the concatenated value of `CDN_URL` and `VERSION`.
"""
return f"{cls.CDN_URL}/{cls.VERSION}"
@classmethod
def url_for_js(cls) -> str:
"""
Returns the CDN URL for Highlight.js.
"""
return f"{cls.cdn_url_with_version()}/highlight.min.js"
@classmethod
def url_for_style(cls, style_name: str) -> str:
"""
Returns the CDN URL for the CSS style with the given name.
Arguments:
style_name: The name of the Highlight.js style to get the
CDN URL for.
"""
return f"{cls.cdn_url_with_version()}/styles/{style_name}.min.css"
class __JavaScript(object):
"""
JavaScript `script` elements of Highlight.js.
"""
__slots__ = ()
@property
def js_import(self) -> script:
"""
Highlight.js JavaScript import element.
"""
return script.ref(CDN.url_for_js())
@property
def js_init(self) -> script:
"""
Script element that initializes Highlight.js on the page. This script must be placed
_after_ the Highlight.js import (`js_import` element here) in the markup.
"""
return script("hljs.initHighlightingOnLoad();")
@property
def js(self) -> Tuple[script, script]:
"""
A tuple of `script` elements: the first imports Highlight.js and the second initializes it.
"""
return (self.js_import, self.js_init)
class __Themes(object):
"""
Highlight.js themes for syntax highlighting.
"""
__slots__ = ()
@property
def atom_one_dark(self) -> link:
"""
Atom One Dark theme CSS link.
"""
return link.css(CDN.url_for_style("atom-one-dark"))
@property
def atom_one_light(self) -> link:
"""
Atom One Light theme CSS link.
"""
return link.css(CDN.url_for_style("atom-one-light"))
@property
def darcula(self) -> link:
"""
Darcula theme CSS link.
"""
return link.css(CDN.url_for_style("darcula"))
@property
def default(self) -> link:
"""
Default theme CSS link.
"""
return link.css(CDN.url_for_style("default"))
@property
def github(self) -> link:
"""
GitHub theme CSS link.
"""
return link.css(CDN.url_for_style("github"))
@property
def github_gist(self) -> link:
"""
GitHub gist theme CSS link.
"""
return link.css(CDN.url_for_style("github-gist"))
@property
def idea(self) -> link:
"""
Idea theme CSS link.
"""
return link.css(CDN.url_for_style("idea"))
@property
def monokai(self) -> link:
"""
Monokai theme CSS link.
"""
return link.css(CDN.url_for_style("monokai"))
@property
def solarized_dark(self) -> link:
"""
Solarized Dark theme CSS link.
"""
return link.css(CDN.url_for_style("solarized-dark"))
@property
def solarized_light(self) -> link:
"""
Solarized Light theme CSS link.
"""
return link.css(CDN.url_for_style("solarized-light"))
js: __JavaScript = __JavaScript()
"""
JavaScript `script` elements of Highlight.js.
"""
themes: __Themes = __Themes()
"""
Highlight.js themes for syntax highlighting.
"""
# -- Generic highligher methods
# -----------------------------------------------------------------------------
def highlight(code: str, *, language: str, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Higher order component that creates a `<pre><code class="{language}">{code}</code></pre>`
HTML segment that Highlight.js will automatically highlight.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
language: The name of the programming language, see https://highlightjs.org/static/demo/.
class_: CSS classes to add to the created `pre` element.
"""
return pre(html_code(code, class_=language), class_=class_, **kwargs)
# -- Language-specific highligher methods
# -----------------------------------------------------------------------------
def bash(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for Bash code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="bash", class_=class_, **kwargs)
def css(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for CSS code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="css", class_=class_, **kwargs)
def html(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for HTML code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="html", class_=class_, **kwargs)
def javascript(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for JavaScript code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="javascript", class_=class_, **kwargs)
def json(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for JSON code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="json", class_=class_, **kwargs)
def markdown(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for MarkDown code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="markdown", class_=class_, **kwargs)
def python(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for Python code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="python", class_=class_, **kwargs)
def sql(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for SQL code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="sql", class_=class_, **kwargs)
def xml(code: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> pre:
"""
Highlighter method for XML code.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `pre` element.
Arguments:
code: The actual code to highlight.
class_: CSS classes to add to the created `pre` element.
"""
return highlight(code, language="xml", class_=class_, **kwargs)
|
import yaml
import os
import check
def 练习(number):
config=yaml.load(open(str(number)+'.yaml'),Loader=yaml.FullLoader)
科目=config['必需项']['科目']
题目=config['必需项']['题目']
答案=config['必需项']['答案']
选项=config['非必需项']['选项']
解析=config['非必需项']['解析']
出题者=config['非必需项']['出题者']
os.system("clear")
print("所属科目:\033[36m"+科目+"\033[0m\n")
print("题目:\n")
print(题目)
if 选项==0:
pass
else:
print("\n")
print(选项)
answer=input("\n请输入答案:")
check.all()
print("\n")
print("你的答案:"+answer)
print("\n\033[36m[正确答案]\033[0m")
print(答案)
if answer==答案:
print("\n\033[32m回答正确!\033[0m")
check.pure()
check.right()
print("虽然回答正确了,但还是看看解析吧!\n")
print("\033[36m[解析]\033[0m\n"+解析)
else:
print("\n\033[31m回答错误!\033[0m")
check.wrong()
print("不要灰心,看看解析吧!\n")
print("\033[36m[解析]\033[0m\n"+解析)
if not os.path.exists("错题本"):
os.system("mkdir 错题本")
else:
pass
action="cp "+str(number)+'.yaml 错题本'
os.system(action)
if 出题者==0:
pass
else:
print("\n本题提供者:\033[36m"+出题者+"\033[0m\n")
os.system("rm -rf "+str(number)+".yaml")
def wrong(选择):
config=yaml.load(open("错题本/"+选择+'.yaml'),Loader=yaml.FullLoader)
科目=config['必需项']['科目']
题目=config['必需项']['题目']
答案=config['必需项']['答案']
选项=config['非必需项']['选项']
解析=config['非必需项']['解析']
出题者=config['非必需项']['出题者']
os.system("clear")
print("所属科目:\033[36m"+科目+"\033[0m\n")
print("题目:\n")
print(题目)
if 选项==0:
pass
else:
print("\n")
print(选项)
answer=input("\n请输入答案:")
check.all()
print("\n")
print("你的答案:"+answer)
print("\n\033[36m[正确答案]\033[0m")
print(答案)
if answer==答案:
print("\n\033[32m回答正确!\033[0m")
check.pure()
check.right()
print("虽然回答正确了,但还是看看解析吧!\n")
print("\033[36m[解析]\033[0m\n"+解析)
os.system("rm -rf 错题本/"+选择+".yaml")
else:
print("\n\033[31m回答错误!\033[0m")
check.wrong()
print("又做错了,仔细看看解析吧!\n")
print("\033[36m[解析]\033[0m\n"+解析)
if 出题者==0:
pass
else:
print("\n本题提供者:\033[36m"+出题者+"\033[0m\n")
|
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
# @return an integer
def get(self, key):
if not key in self.cache:
return -1
value = self.cache.pop(key)
self.cache[key] = value
return value
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if key in self.cache:
self.cache.pop(key)
elif len(self.cache) == self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value |
### Import the ONNX model to Tensorflow ###
import onnx
from onnx_tf.backend import prepare
import torch
# Load the ONNX file
model = onnx.load('DA2_FMNIST/output/mnist.onnx')
# Import the ONNX model to Tensorflow
tf_rep = prepare(model, strict=False)
# Input nodes to the model
print('inputs:', tf_rep.inputs)
# Output nodes from the model
print('outputs:', tf_rep.outputs)
# All nodes in the model
print('tensor_dict:')
print(tf_rep.tensor_dict)
#############
### To know the onnx output node names ###
output_node =[node.name for node in model.graph.output]
input_all = [node.name for node in model.graph.input]
input_initializer = [node.name for node in model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer))
print('Inputs: ', net_feed_input)
print('Outputs: ', output_node)
#########################
### Output mapping ###
def output_label(label):
output_mapping = {
0: "T-shirt/Top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle Boot"
}
input = (label.item() if type(label) == torch.Tensor else label)
return output_mapping[input]
#################################
#### Run the model in Tensorflow ####
import numpy as np
from IPython.display import display
from PIL import Image
print('Image 1:')
img = Image.open('DA2_FMNIST/assets/Bag.png').resize((28, 28)).convert('L')
display(img)
output = tf_rep.run(np.asarray(img, dtype=np.float32)[np.newaxis, np.newaxis, :, :])
print('The image is classified as ', output_label(np.argmax(output)))
print('Image 2:')
img = Image.open('DA2_FMNIST/assets/Pullover.png').resize((28, 28)).convert('L')
display(img)
output = tf_rep.run(np.asarray(img, dtype=np.float32)[np.newaxis, np.newaxis, :, :])
print('The image is classified as ', output_label(np.argmax(output)))
#################################
### Save the Tensorflow model into a file ###
tf_rep.export_graph('DA2_FMNIST/output/mnist.pb') |
from django.conf.urls import url
from .views import BaseView
urlpatterns = [
url(r"^$", BaseView.as_view(), name="base-view-2"),
]
|
from django.db import models
from django.urls import reverse
class Message(models.Model):
recipient = models.ForeignKey(
"fuauth.User", related_name="messages", on_delete=models.CASCADE
)
sender_name = models.CharField(max_length=35)
message_text = models.TextField(max_length=125)
created_date = models.DateTimeField(auto_now=True)
message_sent = models.BooleanField(default=False)
def __str__(self):
""" Gets a readable string of sender name and created_date
to refer to the message by """
return "_".join([self.sender_name, str(self.created_date)])
def get_absolute_url(self):
"""Success redirect"""
return reverse("add-message-success")
|
#Desafio 027
print("Desafio 027")
nome=str(input("Digite seu nome completo:")).strip()
nome1=nome.split()
print("Seu primeiro nome é {}.".format(nome1[0]))
print("Seu último nome é {}.".format(nome1[len(nome1)-1])) |
import pathlib
from setuptools import setup, find_packages
import os
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# Get the code version
version = {}
with open(os.path.join(HERE, "key_driver_analysis/version.py")) as fp:
exec(fp.read(), version)
__version__ = version["__version__"]
setup(
name="key-driver-analysis",
version=__version__,
description="Key Driver Analysis",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/bnriiitb/key-driver-analysis",
author="Nagaraju Budigam",
author_email="nagaraju.iith@gmail.com",
license="MIT",
packages=find_packages(),
install_requires=["numpy>=1.21.3", "pandas>=1.3.4", "scikit_learn>=1.0.1", "setuptools>=58.0.4"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
]
)
|
#!/usr/bin/python3
# coding=utf-8
# pylint: disable=I0011,E0401
# Copyright 2021 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Events
"""
from dusty.tools import log
from arbiter.eventnode import MockEventNode
class EventManager:
""" Events """
def __init__(self):
self.node = MockEventNode()
def subscribe(self, event, callback):
"""" Subscribe to event """
log.debug("Adding event subscription: event=%s, callback=%s", event, callback)
self.node.subscribe(event, callback)
def unsubscribe(self, event, callback):
"""" Unsubscribe from event """
log.debug("Removing event subscription: event=%s, callback=%s", event, callback)
self.node.unsubscribe(event, callback)
def emit(self, event, data=None):
""" Emit event with data """
log.debug("Emitting event: event=%s, data=%s", event, data)
self.node.emit(event, data)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
About: Configuration for Python built-in logging
"""
import logging
logger = logging.getLogger('nc_coder')
logger.propagate = False
LEVELS = {
'debug': logging.DEBUG,
'DEBUG': logging.DEBUG,
'info': logging.INFO,
'INFO': logging.INFO,
'warning': logging.WARNING,
'WARNING': logging.WARNING,
'error': logging.ERROR,
'ERROR': logging.ERROR,
'critical': logging.CRITICAL,
'CRITICAL': logging.CRITICAL
}
FORMAT = {
'default': '%(asctime)s [NC_CODER] %(message)s',
'DEFAULT': '%(asctime)s [NC_CODER] %(message)s',
'debug': '%(asctime)s %(levelname)-8s %(module)s %(threadName)s %(lineno)d [NC_CODER] %(message)s',
'DEBUG': '%(asctime)s %(levelname)-8s %(module)s %(threadName)s %(lineno)d [NC_CODER] %(message)s',
'info': '%(asctime)s %(levelname)-8s %(module)s [NC_CODER] %(message)s',
'INFO': '%(asctime)s %(levelname)-8s %(module)s [NC_CODER] %(message)s'
}
def conf_logger(level):
logger.setLevel(LEVELS[level])
handler = logging.StreamHandler()
formatter = logging.Formatter(FORMAT.get(level, FORMAT['default']))
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from nailgun import consts
from nailgun.test import base
from nailgun.utils import reverse
class TestComponentHandler(base.BaseIntegrationTest):
def setUp(self):
super(TestComponentHandler, self).setUp()
self.release = self.env.create_release(
version='2015.1-8.0',
operating_system='Ubuntu',
modes=[consts.CLUSTER_MODES.ha_compact],
components_metadata=self.env.get_default_components(
name='hypervisor:test_component_1',
bind='some_action_to_process'))
self.plugin = self.env.create_plugin(
name='compatible_plugin',
fuel_version=['8.0'],
releases=[{
'repository_path': 'repositories/ubuntu',
'version': '2015.1-8.0',
'os': 'ubuntu',
'mode': ['ha'],
'deployment_scripts_path': 'deployment_scripts/'}],
components_metadata=self.env.get_default_components(
name='storage:test_component_2',
bind='some_action_to_process'))
def test_get_components(self):
original_components = deepcopy(self.release.components_metadata)
resp = self.app.get(
reverse(
'ComponentCollectionHandler',
kwargs={'release_id': self.release.id}),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertItemsEqual(resp.json_body, [
{
'name': 'hypervisor:test_component_1',
'compatible': [
{'name': 'hypervisor:*'},
{'name': 'storage:object:block:swift'},
{'name': 'storage:test_component_2'}],
'incompatible': [
{'name': 'network:*'},
{'name': 'additional_service:*'}]},
{
'name': 'storage:test_component_2',
'compatible': [
{'name': 'hypervisor:*'},
{'name': 'storage:object:block:swift'}],
'incompatible': [
{'name': 'network:*'},
{'name': 'additional_service:*'}]}])
self.assertItemsEqual(self.release.components_metadata,
original_components)
def test_404_for_get_components_with_none_release_id(self):
resp = self.app.get(
reverse(
'ComponentCollectionHandler',
kwargs={'release_id': None}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(404, resp.status_code)
def test_post_components_not_allowed(self):
resp = self.app.post(
reverse(
'ComponentCollectionHandler',
kwargs={'release_id': self.release.id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(405, resp.status_code)
|
import codecs
file_path = 'test_files/EncodedFile.txt'
data_enc = 'shift-jis'
file_enc = 'euc-jp'
ef = codecs.EncodedFile(file_path, data_enc, file_enc)
#ef = codecs.EncodedFile(file_path, codecs.lookup(data_enc), codecs.lookup(file_enc))#TypeError: lookup() argument must be str, not CodecInfo
print(type(ef))
data = '日本語'
print(data)
print('EncodedFile.encode():', ef.encode(data))
print('EncodedFile.decode():', ef.decode(ef.encode(data)[0]))
#ef.write(data)#TypeError: a bytes-like object is required, not 'str'
#ef.write(ef.encode(data)[0])#AttributeError: 'str' object has no attribute 'write'
#ef.write(ef.decode(ef.encode(data)[0])[0])#TypeError: a bytes-like object is required, not 'str'
#ef.write(b'\x93\xfa\x96{\x8c\xea')#AttributeError: 'str' object has no attribute 'write'
#print('EncodedFile.read():', ef.read(data))#TypeError: arg 1 must be an integer
#print('EncodedFile.read():', ef.read())#AttributeError: 'str' object has no attribute 'read'
|
import time
import uuid
import logging
import numpy as np
import pandas as pd
__all__ = [
"generateCombinations",
"sortLinkages",
"identifySubsetLinkages",
"mergeLinkages",
"removeDuplicateLinkages",
"removeDuplicateObservations",
"calcDeltas"
]
logger = logging.getLogger(__name__)
def generateCombinations(
x,
idx=None,
ct=None,
reps=None
):
# Magic from the wizard himself: Mario Juric
# recursively generate all combinations of idx, assuming
# ct is the list of repeat counts of idx
if x is not None:
# initialization; find indices of where the repetitions are
_, idx, ct = np.unique(x, return_counts=True, return_index=True)
reps = np.nonzero(ct > 1)[0]
if len(reps) == 0:
yield idx
return
i = reps[0]
idx = idx.copy()
for _ in range(ct[i]):
yield from generateCombinations(None, idx, ct, reps[1:])
idx[i] += 1
def sortLinkages(
linkages,
linkage_members,
observations,
linkage_id_col="orbit_id"
):
"""
Check that linkages and linkage_members have their linkage IDs in the same order. If not,
sort both by linkage ID. Second, check that linkage_members is additionally sorted by
mjd_utc. If linkage_members does not contain the mjd_utc column, then observations will be merged
to retrieve the observation time.
Parameters
----------
linkages : `~pandas.DataFrame`
DataFrame containing at least a linkage ID column (linkage_id_col). Each unique linkage should
be only present once.
linkage_members : `~pandas.DataFrame`
DataFrame containing at least a linkage ID column (linkage_id_col) and an observation ID column ('obs_id'). The observation ID
column is used to merge on observations so that the observation time can be retrieved.
observations : `~pandas.DataFrame`
DataFrame containing observations with at least an observation ID column ('obs_id') and a observation time
column ('mjd_utc').
linkage_id_col : str
Name of the linkage ID column.
Returns
-------
linkages : `~pandas.DataFrame`
Linkages sorted by linkage IDs.
linkage_members : `~pandas.DataFrame`
Linkages sorted by linkage IDs and observation times.
"""
time_start = time.time()
logger.debug("Verifying linkages...")
linkages_verified = linkages.copy()
linkage_members_verified = linkage_members.copy()
reset_index = False
id_sorted = np.all(linkages_verified[linkage_id_col].values == linkage_members_verified[linkage_id_col].unique())
if not id_sorted:
logger.debug(f"Linkages and linkage_members dataframes are not equally sorted by {linkage_id_col}. Sorting...")
# Sort by linkage_id
sort_start = time.time()
linkages_verified.sort_values(
by=[linkage_id_col],
inplace=True
)
linkage_members_verified.sort_values(
by=[linkage_id_col],
inplace=True
)
sort_end = time.time()
duration = sort_end - sort_start
logger.debug(f"Sorting completed in {duration:.3f}s.")
reset_index = True
time_present = True
if "mjd_utc" not in linkage_members_verified.columns:
logger.debug("Observation time column ('mjd_utc') is not in linkage_members, merging with observations...")
# Merge with observations to get the observation time for each observation in linkage_members
merge_start = time.time()
linkage_members_verified = linkage_members_verified.merge(observations[["obs_id", "mjd_utc"]],
on="obs_id",
how="left"
)
merge_end = time.time()
duration = merge_end - merge_start
logger.debug(f"Merging completed in {duration:.3f}s.")
time_present = False
linkage_members_verified_ = linkage_members_verified.sort_values(by=[linkage_id_col, "mjd_utc"])
time_sorted = np.all(linkage_members_verified_[[linkage_id_col, "obs_id"]].values == linkage_members_verified[[linkage_id_col, "obs_id"]].values)
if not time_sorted:
logger.debug(f"Linkage_members is not sorted by {linkage_id_col} and mjd_utc. Sorting...")
# Sort by linkage_id, mjd_utc, and finally obs_id
sort_start = time.time()
linkage_members_verified.sort_values(
by=[linkage_id_col, "mjd_utc", "obs_id"],
inplace=True
)
sort_end = time.time()
duration = sort_end - sort_start
logger.debug(f"Sorting completed in {duration:.3f}s.")
reset_index = True
if reset_index:
for df in [linkages_verified, linkage_members_verified]:
df.reset_index(
inplace=True,
drop=True
)
if not time_present:
linkage_members_verified.drop(
columns=["mjd_utc"],
inplace=True
)
time_end = time.time()
duration = time_end - time_start
logger.debug(f"Linkages verified in {duration:.3f}s.")
return linkages_verified, linkage_members_verified
def identifySubsetLinkages(
all_linkages,
linkage_members,
linkage_id_col="orbit_id"
):
"""
Identify each linkage that is a subset of a larger linkage.
Parameters
----------
all_linkages :
"""
linkage_members_merged = linkage_members.copy()
all_linkages_merged = all_linkages.copy()
all_linkages_merged["subset_of"] = None
counts = linkage_members["obs_id"].value_counts()
duplicate_obs_ids = counts.index[counts.values > 1].values
subset_linkages = []
obs_ids_analyzed = set()
i = 0
while len(obs_ids_analyzed) != len(duplicate_obs_ids):
obs_id = duplicate_obs_ids[i]
if obs_id not in obs_ids_analyzed:
# Find all linkages that contain this observation (that have not already been identified as a subset)
linkage_ids = linkage_members_merged[linkage_members_merged["obs_id"].isin([obs_id])][linkage_id_col].values
# Count the occurences of these linkages (the number of observations in each linkage)
linkage_id_counts = linkage_members_merged[(
linkage_members_merged[linkage_id_col].isin(linkage_ids)
& (~linkage_members_merged[linkage_id_col].isin(subset_linkages))
)][linkage_id_col].value_counts()
linkage_ids = linkage_id_counts.index.values
for linkage_id_i in linkage_ids:
# Has linkage i already been identified as a subset? If not, see if it has any subset linkages
is_subset_i = all_linkages_merged[all_linkages_merged[linkage_id_col].isin([linkage_id_i])]["subset_of"].values[0]
if not is_subset_i:
# Grab linkage i's observation IDs
obs_ids_i = linkage_members_merged[linkage_members_merged[linkage_id_col].isin([linkage_id_i])]["obs_id"].values
for linkage_id_j in linkage_ids[np.where(linkage_ids != linkage_id_i)]:
# If this linkage has not already been marked as a subset of another, check to see
# if it is a subset
is_subset_j = all_linkages_merged[all_linkages_merged[linkage_id_col].isin([linkage_id_j])]["subset_of"].values[0]
if not is_subset_j:
# Grab linkage j's observation IDs
obs_ids_j = linkage_members_merged[linkage_members_merged[linkage_id_col].isin([linkage_id_j])]["obs_id"].values
# If linkage j is a subset of linkage i, flag it as such
if set(obs_ids_j).issubset(set(obs_ids_i)):
all_linkages_merged.loc[all_linkages_merged[linkage_id_col].isin([linkage_id_j]), "subset_of"] = linkage_id_i
subset_linkages.append(linkage_id_j)
for j in obs_ids_j:
obs_ids_analyzed.add(j)
obs_ids_analyzed.add(obs_id)
i += 1
return all_linkages_merged, linkage_members_merged
def mergeLinkages(
linkages,
linkage_members,
observations,
linkage_id_col="orbit_id",
filter_cols=["num_obs", "arc_length"],
ascending=[False, False]
):
"""
Merge any observations that share observations into one larger linkage. The larger
linkage will be given the linkage properties of the linkage that when sorted using
filter_cols is first. Linkages that when merged may have different observations occur at the same
time will be split into every possible comibination of unique observation IDs and observation times.
Parameters
----------
linkages : `~pandas.DataFrame`
DataFrame containing at least the linkage ID.
linkage_members : `~pandas.DataFrame`
Dataframe containing the linkage ID and the observation ID for each of the linkage's
constituent observations. Each observation ID should be in a single row.
observations : `~pandas.DataFrame`
Observations DataFrame containing at least and observation ID column and a observation time
column ('mjd_utc').
linkage_id_col : str, optional
Linkage ID column name (must be the same in both DataFrames).
filter_cols : list, optional
List of column names to use to sort the linkages.
ascending : list, optional
Sort the filter_cols in ascending or descending order.
Returns
-------
linkages : `~pandas.DataFrame`
DataFrame with merged linkages added.
linkage_members : `~pandas.DataFrame`
DataFrame with merged linkages added.
merged_from : `~pandas.DataFrame`
DataFrame with column of newly created linkages, and a column
with their constituent linkages.
"""
assert "mjd_utc" not in linkage_members.columns
obs_id_occurences = linkage_members["obs_id"].value_counts()
duplicate_obs_ids = obs_id_occurences.index.values[obs_id_occurences.values > 1]
linkage_members_ = linkage_members.merge(observations[["obs_id", "mjd_utc"]], on="obs_id")
if linkage_id_col == "orbit_id":
columns = ["orbit_id", "epoch", "x", "y", "z", "vx", "vy", "vz"]
else:
columns = ["cluster_id", "vtheta_x_deg", "vtheta_y_deg"]
merged_linkages = []
merged_linkage_members = []
merged_from = []
while len(duplicate_obs_ids) > 0:
duplicate_obs_id = duplicate_obs_ids[0]
linkage_ids_i = linkage_members_[linkage_members_["obs_id"].isin([duplicate_obs_id])][linkage_id_col].unique()
obs_ids = linkage_members_[linkage_members_[linkage_id_col].isin(linkage_ids_i)]["obs_id"].unique()
times = linkage_members_[linkage_members_["obs_id"].isin(obs_ids)].drop_duplicates(subset=["obs_id"])["mjd_utc"].values
obs_ids = obs_ids[np.argsort(times)]
times = times[np.argsort(times)]
for combination in generateCombinations(times):
new_possible_linkages = linkages[linkages[linkage_id_col].isin(linkage_ids_i)].copy()
new_linkage = new_possible_linkages.sort_values(
by=filter_cols,
ascending=ascending
)[:1]
new_linkage_id = str(uuid.uuid4().hex)
new_linkage[linkage_id_col] = new_linkage_id
new_linkage_members = {
linkage_id_col : [new_linkage_id for i in range(len(obs_ids[combination]))],
"obs_id" : obs_ids[combination],
"mjd_utc" : times[combination]
}
merged_from_i = {
linkage_id_col : [new_linkage_id for i in range(len(linkage_ids_i))],
"merged_from" : linkage_ids_i
}
merged_linkages.append(new_linkage)
merged_linkage_members.append(pd.DataFrame(new_linkage_members))
merged_from.append(pd.DataFrame(merged_from_i))
duplicate_obs_ids = np.delete(duplicate_obs_ids, np.isin(duplicate_obs_ids, obs_ids))
if len(merged_linkages) > 0:
merged_linkages = pd.concat(merged_linkages)
merged_linkage_members = pd.concat(merged_linkage_members)
merged_from = pd.concat(merged_from)
merged_linkages.sort_values(
by=[linkage_id_col],
inplace=True
)
merged_linkage_members.sort_values(
by=[linkage_id_col, "mjd_utc"],
inplace=True
)
merged_from.sort_values(
by=[linkage_id_col],
inplace=True
)
for df in [merged_linkages, merged_linkage_members, merged_from]:
df.reset_index(
inplace=True,
drop=True
)
else:
merged_linkages = pd.DataFrame(
columns=columns
)
merged_linkage_members = pd.DataFrame(
columns=[linkage_id_col, "obs_id"]
)
merged_from = pd.DataFrame(
columns=[linkage_id_col, "merged_from"]
)
return merged_linkages[columns], merged_linkage_members[[linkage_id_col, "obs_id"]], merged_from
def removeDuplicateLinkages(
linkages,
linkage_members,
linkage_id_col="orbit_id"
):
"""
Removes linkages that have identical observations as another linkage. Linkage quality is not taken
into account.
Parameters
----------
linkages : `~pandas.DataFrame`
DataFrame containing at least the linkage ID.
linkage_members : `~pandas.DataFrame`
Dataframe containing the linkage ID and the observation ID for each of the linkage's
constituent observations. Each observation ID should be in a single row.
linkage_id_col : str, optional
Linkage ID column name (must be the same in both DataFrames).
Returns
-------
linkages : `~pandas.DataFrame`
DataFrame with duplicate linkages removed.
linkage_members : `~pandas.DataFrame`
DataFrame with duplicate linkages removed.
"""
linkages_ = linkages.copy()
linkage_members_ = linkage_members.copy()
# Expand observation IDs into columns, then remove duplicates using pandas functionality
expanded = linkage_members_[[linkage_id_col, "obs_id"]].groupby(by=[linkage_id_col])["obs_id"].apply(list).to_frame(name="obs_ids")
expanded = expanded["obs_ids"].apply(pd.Series)
linkage_ids = expanded.drop_duplicates().index.values
linkages_ = linkages_[linkages_[linkage_id_col].isin(linkage_ids)]
linkage_members_ = linkage_members_[linkage_members_[linkage_id_col].isin(linkage_ids)]
for df in [linkages_, linkage_members_]:
df.reset_index(
inplace=True,
drop=True
)
return linkages_, linkage_members_
def removeDuplicateObservations(
linkages,
linkage_members,
min_obs=5,
linkage_id_col="orbit_id",
filter_cols=["num_obs", "arc_length"],
ascending=[False, False]
):
"""
Removes duplicate observations using the filter columns. The filter columns are used to sort the linkages
as desired by the user. The first instance of the observation is kept and all other instances are removed.
If any linkage's number of observations drops below min_obs, that linkage is removed.
Parameters
----------
linkages : `~pandas.DataFrame`
DataFrame containing at least the linkage ID.
linkage_members : `~pandas.DataFrame`
Dataframe containing the linkage ID and the observation ID for each of the linkage's
constituent observations. Each observation ID should be in a single row.
min_obs : int, optional
Minimum number of observations for a linkage to be viable.
linkage_id_col : str, optional
Linkage ID column name (must be the same in both DataFrames).
filter_cols : list, optional
List of column names to use to sort the linkages.
ascending : list, optional
Sort the filter_cols in ascending or descending order.
Returns
-------
linkages : `~pandas.DataFrame`
DataFrame with duplicate observations removed.
linkage_members : `~pandas.DataFrame`
DataFrame with duplicate observations removed.
"""
linkages_ = linkages.copy()
linkage_members_ = linkage_members.copy()
linkages_.sort_values(
by=filter_cols,
ascending=ascending,
inplace=True,
ignore_index=True
)
linkages_.set_index(linkage_id_col, inplace=True)
linkage_members_.set_index(linkage_id_col, inplace=True)
linkage_members_ = linkage_members_.loc[linkages_.index.values]
linkage_members_.reset_index(inplace=True)
linkage_members_ = linkage_members_.drop_duplicates(subset=["obs_id"], keep="first")
linkage_occurences = linkage_members_[linkage_id_col].value_counts()
linkages_to_keep = linkage_occurences.index.values[linkage_occurences.values >= min_obs]
linkages_ = linkages_[linkages_.index.isin(linkages_to_keep)]
linkage_members_ = linkage_members_[linkage_members_[linkage_id_col].isin(linkages_to_keep)]
linkages_.reset_index(inplace=True)
linkage_members_.reset_index(
inplace=True,
drop=True
)
return linkages_, linkage_members_
def calcDeltas(
linkage_members,
observations,
groupby_cols=["orbit_id", "night_id"],
delta_cols=["mjd_utc", "RA_deg", "Dec_deg", "mag"]
):
"""
Calculate deltas for the desired columns. For example, if groupby columns are given to be orbit_id and night id, then
the linkages are grouped first by orbit_id then night_id, and then the difference in quantities are calculated for
each column in delta_cols. This can be used to calculate the nightly time difference in observations per linkage, or the
amount of motion a linkage has between observations, etc...
Parameters
----------
linkage_members : `~pandas.DataFrame`
DataFrame containing at least a linkage ID column (linkage_id_col) and an observation ID column ('obs_id'). The observation ID
column is used to merge on observations so that the columns from the observations dataframe can be retrieved if necessary.
observations : `~pandas.DataFrame`
DataFrame containing observations with at least an observation ID column ('obs_id').
groupby_cols : list
Columns by which to group the linkages and calculate deltas.
delta_cols : list
Columns for which to calculate deltas.
Returns
-------
linkage_members : `~pandas.DataFrame`
Copy of the linkage_members dataframe with the delta columns added.
"""
linkage_members_ = linkage_members.copy()
# Check to see if each column on which a delta should be
# calculated is in linkage_members, if not look for it
# in observations
cols = []
for col in delta_cols + groupby_cols:
if col not in linkage_members_.columns:
if col not in observations.columns:
err = (
f"{col} could not be found in either linkage_members or observations."
)
raise ValueError(err)
cols.append(col)
if len(cols) > 0:
linkage_members_ = linkage_members_.merge(
observations[["obs_id"] + cols],
on="obs_id",
how="left"
)
nightly = linkage_members_.groupby(
by=groupby_cols
)
deltas = nightly[delta_cols].diff()
deltas.rename(
columns={c : f"d{c}" for c in delta_cols},
inplace=True
)
linkage_members_ = linkage_members_.join(deltas)
return linkage_members_ |
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random as rnd
import math
def runRandomWalk(steps, not_backfire):
x_vals = []
y_vals = []
x = 0
y = 0
indices = []
direction = math.floor(rnd.uniform(0, 4))
def intersectsSelf():
for i in range(len(x_vals)):
if x_vals[i] == x and y_vals[i] == y:
return True
return False
def step():
nonlocal x, y
if direction == 0:
x += 1
if direction == 1:
y += 1
if direction == 2:
x -= 1
if direction == 3:
y -= 1
for i in range(steps):
step()
if intersectsSelf():
return None
if not_backfire:
newDir = math.ceil(rnd.uniform(0, 3))
direction = (direction - 2 + newDir) % 4
else:
direction = math.floor(rnd.uniform(0, 4))
x_vals.append(x)
y_vals.append(y)
indices.append(i)
return x_vals, y_vals, np.array(indices)
def runUntilWorks(steps, not_backfire):
stepsRun = 1
while True:
val = runRandomWalk(steps, not_backfire)
if val != None:
return val, stepsRun
stepsRun += 1
for i in range(5, 55, 5):
plt.figure()
plt.title("{0} non-intersecting steps".format(i))
(X, Y, indices), stepsRun = runUntilWorks(i, True)
plt.plot(X, Y)
plt.show()
def means(start, end, step, not_backfire):
indices = []
means = []
attempts = []
for i in range(start, end, step):
N = 50
runWalks = 0
for n in range(N):
_, stepsRun = runUntilWorks(i, not_backfire)
runWalks += stepsRun
indices.append(i)
means.append(N / runWalks)
attempts.append(runWalks / N)
return indices, means, attempts
indices1, means1, counts1 = means(1, 50, 1, True)
indices2, means2, counts2 = means(1, 25, 1, False)
plt.figure()
plt.title("Success rate")
plt.plot(indices1, means1, 'tab:red')
plt.plot(indices2, means2, 'tab:orange')
plt.figlegend(("Improved variant", "Original"))
plt.xlabel("N (Steps)", fontsize = 12)
plt.ylabel("Success ratio", fontsize = 12)
plt.show()
plt.figure()
plt.title("Attempts per success")
plt.plot(indices1, counts1, 'tab:blue')
plt.plot(indices2, counts2, 'tab:cyan')
plt.figlegend(("Improved variant", "Original"))
plt.xlabel("N (Steps)", fontsize = 12)
plt.ylabel("Attempts/success", fontsize = 12)
plt.show()
|
import random
''' Movie Generator '''
movie_genre = input('Enter a movie genre you currently prefer: ') # Here we make an intput for the user to choose a genre
action =['The Matrix', 'Pirates of the Caribbean', 'The Terminator', 'Mr & Mrs Smith', 'Rush Hour', 'Pearl Harbor']
comedy = ['Johnny English', 'Cheaper by the Dozen', 'The Pink Panther', 'Miss Congeniality', 'Bride Wars', 'Meet the Parents']
drama = ['A Walk to Remember', 'Me Before You', 'Braveheart', 'Karate Kid', 'Dear John', 'Midnight Sun']
fantasy = ['Charlie and the Chocolate Factory', 'Jumanji', 'Edward Scissorhands', 'Maleficent', 'Journey to the Center of the Earth', 'Alice in Wonderland']
horror = ['Final Destination', 'The Conjuring', 'Annabelle', 'A Quiet Place', 'Hush', 'Bird Box']
mystery = ['Gone Girl', 'Sherlock Holmes', 'Now You See Me', 'Now You See Me']
romance = ['Twilight', 'The Vow', 'The Proposal', 'Two Weeks Notice', '50 First Dates', 'Everything, Everything']
thriller = ['Speed', 'Non-Stop', 'Greenland', 'Skyscraper', 'San Andreas', 'Tidal Wave']
# In the lines above, we create lists that include movies of the same genre
if movie_genre == 'action': # For example if the user entered 'action', on the next line the programm will print arbitrary movie from the corresponding list
print(random.choice(action)) # The action is repeated for each genre
elif movie_genre == 'comedy':
print(random.choice(comedy))
elif movie_genre == 'drama':
print(random.choice(drama))
elif movie_genre == 'fantasy':
print(random.choice(fantasy))
elif movie_genre == 'horror':
print(random.choice(horror))
elif movie_genre == 'mystery':
print(random.choice(mystery))
elif movie_genre == 'romance':
print(random.choice(romance))
elif movie_genre == 'thriller':
print(random.choice(thriller))
else: # If the user has selected a genre not applied with a list, the program will print 'Error'
print('Error') |
# Copyright 2019 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import Dict, Any, List
from dragonchain import logger
from dragonchain import exceptions
from dragonchain.lib import authorization
from dragonchain.lib.interfaces import secrets
from dragonchain.lib.interfaces import storage
FOLDER = "KEYS"
_log = logger.get_logger()
def get_api_key_list_v1() -> Dict[str, List[Dict[str, Any]]]:
"""
Gets the list of api key IDs
Returns:
List of API keys
"""
keys = storage.list_objects(prefix=FOLDER)
valid_keys = list(filter(lambda x: not x.startswith("KEYS/WEB_") and not x.startswith("KEYS/SC_") and not x.startswith("KEYS/INTERCHAIN"), keys))
returned_keys = []
for key in valid_keys:
resp = storage.get_json_from_object(key)
returned_keys.append(
{"id": str(resp["id"]), "registration_time": int(resp["registration_time"]), "nickname": str(resp.get("nickname") or "")}
)
return {"keys": returned_keys}
def create_api_key_v1(nickname: str = "") -> Dict[str, Any]:
"""
Create a new api key
Returns:
newly created API keys
"""
key = authorization.register_new_auth_key(nickname=nickname)
return {"key": str(key["key"]), "id": str(key["id"]), "registration_time": int(key["registration_time"]), "nickname": str(key.get("nickname"))}
def delete_api_key_v1(key_id: str) -> None:
"""Delete api key by key ID ONLY if it is not the last key on the chain
Args:
key_id: ID of api key to delete
"""
# Don't allow removal of reserved keys
if key_id.startswith("SC_") or key_id.startswith("INTERCHAIN") or key_id.startswith("WEB_"):
raise exceptions.ActionForbidden("cannot delete reserved API keys")
# Don't allow removal of root keys
root_key_id = secrets.get_dc_secret("hmac-id")
if root_key_id == key_id:
raise exceptions.ActionForbidden("Cannot remove root API key")
# Delete the actual key after previous checks pass
if not authorization.remove_auth_key(auth_key_id=key_id, interchain=False):
raise RuntimeError("Unkown error deleting key from storage")
def get_api_key_v1(key_id: str) -> Dict[str, Any]:
"""Returns the api key information (without the actual key itself) for a key id
Args:
key_id: ID of api key to get
hide_key: remove the api key from the returned key
Returns:
API key ID and registration timestamp (if any)
"""
if key_id.startswith("SC_") or key_id.startswith("WEB_") or key_id.startswith("INTERCHAIN"):
raise exceptions.NotFound(f"api key with ID {key_id} not found")
key = storage.get_json_from_object(f"KEYS/{key_id}")
return {"id": str(key["id"]), "registration_time": int(key["registration_time"]), "nickname": str(key.get("nickname") or "")}
def update_api_key_v1(key_id: str, nickname: str) -> None:
"""Updates the nickname for an existing key
Args:
key_id: ID of api key to update
nickname: new nickname for the given key
"""
key = storage.get_json_from_object(f"KEYS/{key_id}")
key["nickname"] = nickname
storage.put_object_as_json(f"KEYS/{key_id}", key)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tony <stayblank@gmail.com>
# Time: 2019/5/31 00:34
import unittest
from onion_decorator.override import override
import test_somepackage
class SuperClass(object):
def some_method(self):
"""Super Class Docs"""
return 'super'
class SubClass(SuperClass):
@override
def some_method(self):
return 'sub'
class Subber(SuperClass):
@override
def some_method(self):
"""Subber"""
return 1
class Sub2(test_somepackage.SomeClass, SuperClass):
@override
def somewhat_fun_method(self):
return 'foo'
@override
def some_method(self):
pass
class SubclassOfInt(int):
@override
def __str__(self):
return "subclass of int"
class overrideTests(unittest.TestCase):
def test_override_passes_for_same_package_superclass(self):
sub = SubClass()
self.assertEqual(sub.some_method(), 'sub')
self.assertEqual(sub.some_method.__doc__, 'Super Class Docs')
def test_override_does_not_override_method_doc(self):
sub = Subber()
self.assertEqual(sub.some_method(), 1)
self.assertEqual(sub.some_method.__doc__, 'Subber')
def test_override_passes_for_superclass_in_another_package(self):
sub2 = Sub2()
self.assertEqual(sub2.somewhat_fun_method(), 'foo')
self.assertEqual(sub2.somewhat_fun_method.__doc__, 'LULZ')
def test_assertion_error_is_thrown_when_method_not_in_superclass(self):
try:
class ShouldFail(SuperClass):
@override
def somo_method(self):
pass
raise RuntimeError('Should not go here')
except AssertionError:
pass
def test_can_override_builtin(self):
x = SubclassOfInt(10)
self.assertEqual(str(x), 'subclass of int')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate
from frappe.model.document import Document
class Vehicle(Document):
def validate(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("Insurance Start date should be less than Insurance End date")) |
from random import randint
def random_base(RNAflag = False):
return ("UCAG" if RNAflag else "TCAG")[randint(0,3)]
def random_codon(RNAflag = True):
return random_base(RNAflag) + random_base(RNAflag) + random_base(RNAflag)
print(random_codon()) |
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from infraform.cli.run import cli as run_cli
def add_run_parser(subparsers):
"""The parser for sub command 'run'."""
run_parser = subparsers.add_parser("run")
run_parser.set_defaults(func=run_cli.main)
run_parser.add_argument('--scenario', '-s',
dest="scenario",
help='Predefined scenario to use for execution')
run_parser.add_argument('--platform', dest="platform",
help="The platform to use \
(podman, docker, terraform, shell, python)")
run_parser.add_argument('--vars', dest="vars",
default="",
help="extra variables")
run_parser.add_argument('--skip-check', dest="skip_check",
action="store_true",
help="Skip requirements check")
run_parser.add_argument('--hosts', dest="hosts",
default="", nargs='*',
help="host(s) to execute the scenario/command on \
by specifying host name or user@host")
run_parser.add_argument('--commands', dest="commands",
default="", nargs='*',
help="Command(s) to execute instead of a scenario")
run_parser.add_argument('--debug', dest="debug",
action="store_true",
help="Enable debug level logging")
|
# -*- coding: utf-8 -*-
#Ativa compatibilidade com ç ^ ~ ´ `
print("Olá mundo!")
"""
Comentario
de
Multiplas
linhas
""" |
import math #this is just becouse i used math down below
exNum1 = -5
exNum2 = 5
print(abs(exNum1)) #absolute function
if abs(exNum1) == exNum2:
print('these are the same')
exList = [1,2,3,4,5,6,7,8,9,10,11]
print(max(exList))
print(min(exList))
intMe = '55' #take it as a string
print(intMe)
print(int(intMe))
print(float(intMe))
strMe = 77
print(str(strMe))
x = 5.622
print(round(x))
print(math.floor(x))
print(math.ceil(x))
|
import sys
from unittest.mock import patch
from conda_diff import cli
def test_parse_args():
mock_args = ["conda-diff", "env_a", "env_b"]
with patch.object(sys, "argv", mock_args):
args = cli.parse_args()
assert args.environment_a == "env_a"
assert args.environment_b == "env_b"
|
name = 'Michael'
age = 43
# Create the string "Hi, I'm Michael and I'm 43 years old."
# crash: print("Hi, I'm " + name + " and I'm " + age + " years old.")
# works, but not pythonic
print("Hi, I'm " + name + " and I'm " + str(age) + " years old.")
# probably pythonic
print("Hi, I'm %s and I'm %d years old." % (name, age))
# pythonic
print("Hi, I'm {} and I'm {} years old.".format(name, age))
print("Hi, I'm {1} years old and my name is {0}, yeah {1}.".format(name, age))
data = {'day': 'Saturday', 'office': 'Home office', 'other': 'UNUSED'}
# print: On Saturday I was working in my Home office!
print("On {day} I was working in my {office}!".format(**data))
# In Python 3.6
print("Hi, I'm {name} and I'm {age} years old.".format(name=name, age=age))
# print(f"Hi, I'm {name} and I'm {age} years old.")
|
import os
from flask import Flask, request, jsonify, g, url_for, abort
from flask_httpauth import HTTPBasicAuth
from models import db, PlantType, Plant, PlantData, User
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
auth = HTTPBasicAuth()
@app.before_first_request
def init_admin_account():
admin = (
db.session
.query(User)
.filter_by(username=app.config['ADMIN_USERNAME'])
.first()
)
if admin is None:
admin = User(username=app.config['ADMIN_USERNAME'], role='admin')
admin.hash_password(app.config['ADMIN_PASSWORD'])
db.session.add(admin)
db.session.commit()
@auth.get_user_roles
def get_roles(auth):
# TODO reduce the number of db queries
user = db.session.query(User).filter_by(username=auth.username).first()
return user.role
@auth.verify_password
def verify_password(username_or_token, password):
user = User.verify_auth_token(username_or_token)
if not user:
user = (
db.session.query(User)
.filter_by(username=username_or_token)
.first()
)
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/')
def hello():
return {'app': 'plantinum api'}
@app.route('/users', methods=['POST'])
@auth.login_required(role='admin')
def add_user():
# TODO implement JSON validation with decorators
# TODO implement logging with decorators
username = request.json.get('username')
password = request.json.get('password')
role = request.json.get('role')
data_missing = None in (username, password, role)
if data_missing:
abort(400)
role_invalid = role not in User.types.values()
if role_invalid:
abort(400)
user_existing = (
db.session
.query(User)
.filter_by(username=username)
.first()
) is not None
if user_existing:
abort(400)
user = User(username=username, role=role)
user.hash_password(password)
db.session.add(user)
db.session.commit()
return (
jsonify({'username': user.username}),
201,
{'Location': url_for(
'retrieve_user',
username=user.username,
_external=True
)}
)
@app.route('/users/<username>', methods=['GET'])
@auth.login_required(role='admin')
def retrieve_user(username):
user = db.session.query(User).filter_by(username=username).first()
if not user:
abort(400)
return jsonify({
'id': user.id,
'username': user.username,
'role': user.role
})
@app.route('/users', methods=['GET'])
@auth.login_required(role='admin')
def retrieve_user_list():
offset = request.args.get('offset', 0)
limit = request.args.get('limit', 5)
role = request.args.get('role', '')
role_codes = (next(
(tcode for tcode, tname in User.types.items() if tname == role), None
),)
if None in role_codes:
role_codes = tuple(User.types.keys())
user_list = (
db.session.query(User)
.filter(User.usertype.in_(role_codes))
.order_by(User.id.asc())
.offset(offset)
.limit(limit)
.all()
)
users = [{'id': user.id, 'username': user.username, 'role': user.role}
for user in user_list]
return jsonify(users)
@app.route('/token', methods=['GET'])
@auth.login_required
def make_auth_token():
token = g.user.generate_auth_token(720)
return jsonify({'token': token.decode('ascii'), 'duration': 720})
@app.route('/plants/<int:plant_id>', methods=['GET'])
@auth.login_required
def fetch_plant(plant_id):
plant = (
db.session.query(Plant)
.filter_by(id=plant_id)
.first()
)
return jsonify({
"id": plant.id,
"name": plant.name,
"date_added": plant.date_added
})
@app.route('/plants', methods=['GET'])
@auth.login_required
def fetch_plant_list():
type_name = request.args.get('type', 'all')
# FIXME bug in .filter(PlantType.name in type_names)
if type_name == 'all':
# type_names = 'flower', 'succulent', 'foliageplant', 'palmplant'
results = (
db.session.query(Plant, PlantType)
.filter(Plant.type_id == PlantType.id)
# .filter(PlantType.name in type_names)
.order_by(Plant.id.desc())
.all()
)
else:
# type_names = (type_name,)
results = (
db.session.query(Plant, PlantType)
.filter(Plant.type_id == PlantType.id)
.filter(PlantType.name == type_name)
.order_by(Plant.id.desc())
.all()
)
plant_list = [
{
'id': plant.id,
'name': plant.name,
'type': plant_type.name,
'date_added': plant.date_added
}
for plant, plant_type in results
]
return jsonify(plant_list)
@app.route('/plants', methods=['POST'])
@auth.login_required(role=['admin', 'machine'])
def add_new_plant():
if request.is_json:
plant_info = request.get_json()
# NOTE HARDCODED DATA
type_codes = {
'flower': 1,
'succulent': 2,
'foliageplant': 3,
'palmplant': 4
}
type_id = type_codes[plant_info['type']]
new_plant = Plant(
name= plant_info['name'],
type_id=type_id,
)
db.session.add(new_plant)
db.session.commit()
return (
jsonify({'plant_id': new_plant.id}),
201,
{'Location': url_for(
'fetch_plant',
plant_id=new_plant.id,
_external=True
)}
)
else:
return {
"error": ("The request payload is not in JSON format"
" or the data is not complete")
}
# TODO change API endpoints: sensor_data to plant_data
@app.route('/plants/<int:plant_id>/sensor_data', methods=['GET'])
@auth.login_required
def fetch_data_list(plant_id):
# TODO Implement query param to fetch a number of data rows
data_plant = (
db.session.query(PlantData)
.filter_by(plant_id=plant_id)
.order_by(PlantData.id.desc())
.all()
)
data_list = []
for data in data_plant:
data_list.append({
'temperature': f'{data.temp}',
'humidity': f'{data.humidity}',
'moisture': f'{data.moisture}',
'light_intensity': f'{data.light_intensity}',
'img_url' : f'{data.img_url}'
})
return jsonify(data_list)
@app.route('/plants/<int:plant_id>/sensor_data', methods=['POST'])
@auth.login_required(role=['admin', 'machine'])
def receive_sensor_data(plant_id):
if request.is_json:
data = request.get_json()
ss_data = PlantData(
plant_id=plant_id,
temp=data['temperature'], # change `temp` to `temperature`
humidity=data['humidity'],
moisture=data['moisture'],
light_intensity=data['light_intensity'],
img_url=data['img_url'],
state=data['state']
)
db.session.add(ss_data)
db.session.commit()
return {
"message": (f"Sensor data of plant with id {ss_data.plant_id} "
"has been inserted successfully")
}
else:
return (
{"error": ("The request payload is not in JSON format"
" or the data is incomplete")},
400
)
@app.route('/plants/<int:plant_id>/sensor_data/latest', methods=['GET'])
@auth.login_required
def retrieve_latest(plant_id):
# TODO write error handling
latest_ss_data = (
db.session.query(PlantData)
.filter_by(plant_id=plant_id)
.order_by(PlantData.id.desc())
.first()
)
return jsonify({
"temperature": latest_ss_data.temp,
"humidity": latest_ss_data.humidity,
"moisture": latest_ss_data.moisture,
"light_intensity": latest_ss_data.light_intensity,
"img_url": latest_ss_data.img_url,
"state": latest_ss_data.state,
"time_recorded": latest_ss_data.time_recorded
})
@app.route('/plants_with_data', methods=['GET'])
@auth.login_required
def fetch_plants_with_data():
results = (
db.session.query(Plant, PlantType)
.filter(Plant.type_id == PlantType.id)
# .filter(PlantType.name in type_names)
.order_by(Plant.id.desc())
.all()
)
plant_list = [
{
'id': plant.id,
'name': plant.name,
'type': plant_type.name,
'date_added': plant.date_added
}
for plant, plant_type in results
]
plants_with_data = []
for plant in plant_list: #! TODO fix querying in loop
latest_ss_data = (
db.session.query(PlantData)
.filter_by(plant_id=plant['id'])
.order_by(PlantData.id.desc())
.first()
)
if latest_ss_data is None:
latest_plant_data = {}
else:
latest_plant_data = {
"temperature": latest_ss_data.temp,
"humidity": latest_ss_data.humidity,
"moisture": latest_ss_data.moisture,
"light_intensity": latest_ss_data.light_intensity,
"img_url": latest_ss_data.img_url,
"state": latest_ss_data.state,
"time_recorded": latest_ss_data.time_recorded
}
plants_with_data.append({
'id': plant['id'],
'name': plant['name'],
'type': plant['type'],
'date_added': plant['date_added'],
'latest_data': latest_plant_data
})
return jsonify(plants_with_data)
if __name__ == '__main__':
app.run(debug=True)
|
from datetime import datetime
from decimal import Decimal
from typing import List
from pytest import raises, mark, approx
from fastapi import HTTPException
from santaka.stock.utils import (
YahooMarket,
calculate_commission,
calculate_sell_tax,
get_active_markets,
validate_stock_transaction,
)
from santaka.stock.models import NewStockTransaction, TransactionType
from santaka.account import Bank
class FakeRecord:
def __init__(
self,
transaction_type,
quantity,
tax,
commission,
date,
transaction_note,
stock_id,
price,
transaction_ex_rate,
):
self.transaction_type = transaction_type
self.quantity = quantity
self.tax = tax
self.commission = commission
self.date = date
self.transaction_note = transaction_note
self.transaction_ex_rate = transaction_ex_rate
self.stock_id = stock_id
self.price = price
def test_first_transaction_not_buy():
with raises(HTTPException):
validate_stock_transaction(
[],
NewStockTransaction(
price=1,
quantity=1,
date=datetime.now(),
transaction_type=TransactionType.sell,
stock_id=1,
),
)
def test_total_quantity_greater_than_sell():
with raises(HTTPException):
validate_stock_transaction(
[FakeRecord(TransactionType.buy, 1, 1, 1, datetime.today(), "", 1, 1, 1)],
NewStockTransaction(
price=1,
quantity=2,
date=datetime.now(),
transaction_type=TransactionType.sell,
stock_id=1,
),
)
@mark.parametrize(
["bank", "market", "price", "quantity", "expected", "financial_currency"],
[
[
Bank.FINECOBANK.value,
YahooMarket.ITALY.value,
Decimal("200"),
10,
Decimal("3.8"),
"",
],
[
Bank.FINECOBANK.value,
YahooMarket.ITALY.value,
Decimal("2150"),
10,
Decimal("19"),
"",
],
[
Bank.FINECOBANK.value,
YahooMarket.ITALY.value,
Decimal("23"),
10,
Decimal("2.95"),
"",
],
[
Bank.FINECOBANK.value,
YahooMarket.EU.value,
Decimal("99.98"),
3,
Decimal("2.95"),
"",
],
[
Bank.FINECOBANK.value,
YahooMarket.UK.value,
Decimal("16.935"),
60,
Decimal("20.0305"),
"",
],
[
Bank.FINECOBANK.value,
YahooMarket.USA_NYSE.value,
Decimal("216.5"),
60,
Decimal("12.95"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.ITALY.value,
Decimal("44"),
10,
Decimal("2.5"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.ITALY.value,
Decimal("100"),
50,
Decimal("8.5"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.ITALY.value,
Decimal("440"),
100,
Decimal("17.5"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.EU.value,
Decimal("44"),
100,
Decimal("11"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.UK.value,
Decimal("16.935"),
60,
Decimal("16.0805"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.USA_NASDAQ.value,
Decimal("44"),
100,
Decimal("11"),
"",
],
[
Bank.BG_SAXO.value,
YahooMarket.CANADA.value,
Decimal("124"),
30,
Decimal("29.6"),
"",
],
[
Bank.BANCA_GENERALI.value,
YahooMarket.ITALY.value,
Decimal("230"),
10,
Decimal("8"),
"",
],
[
Bank.BANCA_GENERALI.value,
YahooMarket.ITALY.value,
Decimal("630"),
10,
Decimal("9.45"),
"",
],
[
Bank.BANCA_GENERALI.value,
YahooMarket.ITALY.value,
Decimal("1630"),
10,
Decimal("20"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.ITALY.value,
Decimal("11.1"),
50,
Decimal("6"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.ITALY.value,
Decimal("75"),
100,
Decimal("13.5"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.ITALY.value,
Decimal("150"),
100,
Decimal("25"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.EU.value,
Decimal("11.1"),
60,
Decimal("12"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.EU.value,
Decimal("75"),
100,
Decimal("13.5"),
"",
],
[
Bank.CHE_BANCA.value,
YahooMarket.EU.value,
Decimal("200"),
100,
Decimal("35"),
"",
],
[None, YahooMarket.ITALY.value, Decimal("0"), 100, Decimal("0"), ""],
],
)
def test_calculate_commission(
bank: str,
market: str,
price: Decimal,
quantity: int,
expected: Decimal,
financial_currency: str,
):
commission = calculate_commission(bank, market, price, quantity, financial_currency)
assert commission == expected
@mark.parametrize(
["market", "fiscal_price", "last_price", "quantity", "expected"],
[
[
YahooMarket.ITALY.value,
Decimal("13.4387"),
Decimal("10.582"),
75,
Decimal("0"),
],
[
YahooMarket.ITALY.value,
Decimal("24.7"),
Decimal("39.57"),
20,
Decimal("77.324"),
],
[
YahooMarket.USA_NYSE.value,
Decimal("118.59034"),
Decimal("127.35"),
5,
Decimal("16.25"),
],
[
YahooMarket.CANADA.value,
Decimal("42.19657"),
Decimal("57.42"),
140,
Decimal("790.7297"),
],
[
YahooMarket.EU.value,
Decimal("100.9633"),
Decimal("233.3"),
3,
Decimal("179.6073"),
],
],
)
def test_calculate_sell_tax(
market: str,
fiscal_price: Decimal,
last_price: Decimal,
quantity: int,
expected: Decimal,
):
tax = calculate_sell_tax(market, fiscal_price, last_price, quantity)
assert approx(tax, Decimal("0.001")) == expected
@mark.parametrize(
["dt", "expected_markets"],
[
[datetime(2021, 6, 11, 9, 0, 0, 0), ["LSE", "XETRA", "Milan"]],
[datetime(2021, 6, 16, 19, 0, 0, 0), ["NasdaqGS", "NYSE", "Toronto"]],
[datetime(2021, 7, 4, 19, 0, 0, 0), []],
[
datetime(2021, 7, 6, 15, 0, 0, 0),
["NasdaqGS", "NYSE", "LSE", "XETRA", "Milan", "Toronto"],
],
],
) # TODO add some test cases
def test_get_active_markets(dt: datetime, expected_markets: List[str]):
active_market = get_active_markets(dt)
assert active_market == expected_markets
|
import abc, methodtools
currentmodule = None
class UnitsError(Exception): pass
class Distance:
def __new__(self, *args, **kwargs):
return currentmodule.Distance(*args, **kwargs)
def onepixel(pscale):
return Distance(pixels=1, pscale=pscale)
def onemicron(pscale):
return Distance(microns=1, pscale=pscale)
class ThingWithPscale(abc.ABC):
@property
@abc.abstractmethod
def pscale(self): return self.__pscale
@pscale.setter
def pscale(self, pscale): object.__setattr__(self, "_ThingWithPscale__pscale", pscale)
@methodtools.lru_cache()
@property
def onepixel(self):
return onepixel(pscale=self.pscale)
@methodtools.lru_cache()
@property
def onemicron(self):
return onemicron(pscale=self.pscale)
class ThingWithQpscale(abc.ABC):
@property
@abc.abstractmethod
def qpscale(self): return self.__qpscale
@qpscale.setter
def qpscale(self, qpscale): object.__setattr__(self, "_ThingWithQpscale__qpscale", qpscale)
@methodtools.lru_cache()
@property
def oneqppixel(self):
return onepixel(pscale=self.qpscale)
@methodtools.lru_cache()
@property
def oneqpmicron(self):
return onemicron(pscale=self.qpscale)
class ThingWithApscale(abc.ABC):
@property
@abc.abstractmethod
def apscale(self): return self.__apscale
@apscale.setter
def apscale(self, apscale): object.__setattr__(self, "_ThingWithApscale__apscale", apscale)
@methodtools.lru_cache()
@property
def oneappixel(self):
return onepixel(pscale=self.apscale)
@methodtools.lru_cache()
@property
def oneapmicron(self):
return onemicron(pscale=self.apscale)
class ThingWithImscale(abc.ABC):
@property
@abc.abstractmethod
def imscale(self): pass
@imscale.setter
def imscale(self, imscale): object.__setattr__(self, "_ThingWithImscale__imscale", imscale)
@property
def oneimpixel(self): return onepixel(pscale=self.imscale)
@property
def oneimmicron(self): return onemicron(pscale=self.imscale)
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RecordLoss():
def __init__(self):
self.n_iter = 0
self.losses = AverageMeter()
self.nlls = AverageMeter()
self.point_kls = AverageMeter()
self.logprob_pyxs = AverageMeter()
self.entropys = AverageMeter()
self.qx_samples = []
self.px_samples = []
self.py_samples = []
self.measures =(self.losses, self.nlls,self.point_kls,self.logprob_pyxs,self.entropys)
def reset(self):
self.__init__()
def add_samples_qx(self, samples):
self.qx_samples += [samples]
def add_samples_py(self, samples):
self.py_samples += [samples]
def add_samples_px(self, samples):
self.px_samples += [samples]
def reset_samples(self):
self.qx_samples = []
self.px_samples = []
self.py_samples = []
def update(self, loss=None, nll=None, point_kl=None, logprob_pyx=None, entropy=None):
for (i,term) in enumerate((loss, nll, point_kl, logprob_pyx, entropy)):
if term is not None:
self.measures[i].update(term[0],term[1])
self.n_iter += 1
def batch_seqs(seqs):
max_len = max(len(s) for s in seqs)
data = np.zeros((max_len, len(seqs)))
for i, s in enumerate(seqs):
data[:len(s), i] = s
return torch.LongTensor(data)
def weight_top_p(vec, p):
indices = (-vec).argsort()
out = np.zeros_like(vec)
cumprob = 0
for i in indices:
excess = max(0, cumprob + vec[i] - p)
weight = vec[i] - excess
out[i] = weight
cumprob += weight
if excess > 0:
break
out /= out.sum()
return out
def trim(L, obj):
if obj in L:
return L[:L.index(obj)+1]
return L
"""Noam Scheduler."""
from torch.optim import lr_scheduler
class NoamLR(lr_scheduler._LRScheduler):
r"""Noam Learning rate schedule.
Increases the learning rate linearly for the first `warmup_steps` training steps, then decreases it proportional to
the inverse square root of the step number.
^
/ \
/ `
/ `
/ `
/ `
/ `
/ `
/ `
/ `
/ `
Parameters
----------
optimizer : torch.optim.Optimizer
Optimiser instance to modify the learning rate of.
warmup_steps : int
The number of steps to linearly increase the learning rate.
Notes
-----
If step <= warmup_steps,
scale = step / warmup_steps
If step > warmup_steps,
scale = (warmup_steps ^ 0.5) / (step ^ 0.5)
"""
def __init__(self, optimizer, model_size, warmup_steps=4000):
self.warmup_steps = warmup_steps
self.model_size = model_size
super(NoamLR, self).__init__(optimizer)
def scale(self, step):
return self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup_steps ** (-1.5))
def get_lr(self):
scale = self.scale(max(1,self._step_count))
return [base_lr * scale for base_lr in self.base_lrs]
|
# -*- coding: utf-8 -*-
import unittest
import time
import math
from timer.timer import Timer
from timer.timer_task_entry import TimerTaskEntry
class TimerTest(unittest.TestCase):
now = time.time()
def _print(self, key):
t = time.time() - self.now
print('time: %s -- key: %s' % (round(t), key))
self.assertLess(math.fabs(key - t), 1, 'error')
def test_add(self):
"""
添加任务测试
:return:
"""
timer = Timer(wheel_size=5)
keys = [0, 0.1, 0.3, 0.8, 1, 2, 3, 4, 5, 8, 9, 10, 18, 24, 26, 30]
for key in keys:
timer.add(TimerTaskEntry(delay=key * 1000, task=self._print, key=key))
time.sleep(keys[-1])
timer.shutdown()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import shutil
from docuploader import log, shell, tar
from docuploader.protos import metadata_pb2
from google.cloud import storage
from google.oauth2 import service_account
from google.protobuf import text_format
DOCFX_PREFIX = "docfx-"
DOCFX_JSON_TEMPLATE = """
{{
"build": {{
"content": [
{{
"files": ["**/*.yml", "**/*.md"],
"src": "obj/api",
"dest": "api"
}}
],
"globalMetadata": {{
"_appTitle": "{package}",
"_disableContribution": true,
"_appFooter": " ",
"_disableNavbar": true,
"_disableBreadcrumb": true,
"_enableSearch": false,
"_disableToc": true,
"_disableSideFilter": true,
"_disableAffix": true,
"_disableFooter": true,
"_rootPath": "{path}",
"_projectPath": "{project_path}"
}},
"template": [
"default",
"devsite_template"
],
"overwrite": [
"obj/snippets/*.md"
],
"dest": "site"
}}
}}
"""
def clone_templates(dir):
shell.run(
[
"git",
"clone",
"--depth=1",
"https://github.com/googleapis/doc-templates.git",
".",
],
cwd=dir,
hide_output=True,
)
def process_blob(blob, credentials, devsite_template):
log.info(f"Processing {blob.name}...")
tmp_path = pathlib.Path("tmp")
api_path = tmp_path.joinpath("obj/api")
output_path = tmp_path.joinpath("site/api")
api_path.mkdir(parents=True, exist_ok=True)
tar_filename = tmp_path.joinpath(blob.name)
tar_filename.parent.mkdir(parents=True, exist_ok=True)
blob.download_to_filename(tar_filename)
log.info(f"Downloaded gs://{blob.bucket.name}/{blob.name} to {tar_filename}")
tar.decompress(tar_filename, api_path)
log.info(f"Decompressed {blob.name} in {api_path}")
metadata_path = api_path.joinpath("docs.metadata")
metadata = metadata_pb2.Metadata()
text_format.Merge(metadata_path.read_text(), metadata)
pkg = metadata.name
with open(tmp_path.joinpath("docfx.json"), "w") as f:
f.write(
DOCFX_JSON_TEMPLATE.format(
**{
"package": pkg,
"path": f"/{metadata.language}/docs/reference/{pkg}/latest",
"project_path": f"/{metadata.language}/",
}
)
)
log.info("Wrote docfx.json")
# TODO: remove this once _toc.yaml is no longer created.
if pathlib.Path(api_path.joinpath("_toc.yaml")).is_file():
shutil.move(api_path.joinpath("_toc.yaml"), api_path.joinpath("toc.yml"))
log.info(f"Running `docfx build` for {blob.name}...")
shell.run(
["docfx", "build", "-t", f"default,{devsite_template.absolute()}"],
cwd=tmp_path,
hide_output=False,
)
# Rename the output TOC file to be _toc.yaml to match the expected
# format.
shutil.move(output_path.joinpath("toc.html"), output_path.joinpath("_toc.yaml"))
log.success(f"Done building HTML for {blob.name}. Starting upload...")
# Reuse the same docs.metadata file. The original docfx- prefix is an
# command line option when uploading, not part of docs.metadata.
shutil.copyfile(
api_path.joinpath("docs.metadata"), output_path.joinpath("docs.metadata")
)
shell.run(
[
"docuploader",
"upload",
".",
f"--credentials={credentials}",
f"--staging-bucket={blob.bucket.name}",
],
cwd=output_path,
hide_output=False,
)
shutil.rmtree(tmp_path)
log.success(f"Done with {blob.name}!")
def build_blobs(blobs, credentials):
num = len(blobs)
if num == 0:
log.success("No blobs to process!")
return
log.info("Let's build some docs!")
blobs_str = "\n".join(map(lambda blob: blob.name, blobs))
log.info(f"Processing {num} blob{'' if num == 1 else 's'}:\n{blobs_str}")
templates_dir = pathlib.Path("doc-templates")
if templates_dir.is_dir():
shutil.rmtree(templates_dir)
templates_dir.mkdir(parents=True, exist_ok=True)
log.info(f"Cloning templates into {templates_dir.absolute()}")
clone_templates(templates_dir)
log.info(f"Got the templates ({templates_dir.absolute()})!")
devsite_template = templates_dir.joinpath("third_party/docfx/templates/devsite")
failures = []
for blob in blobs:
try:
process_blob(blob, credentials, devsite_template)
except Exception as e:
# Keep processing the other files if an error occurs.
log.error(f"Error processing {blob.name}:\n\n{e}")
failures.append(blob.name)
shutil.rmtree(templates_dir)
if len(failures) > 0:
failure_str = "\n".join(failures)
raise Exception(
f"Got errors while processing the following archives:\n{failure_str}"
)
log.success("Done!")
def storage_client(credentials):
parsed_credentials = service_account.Credentials.from_service_account_file(
credentials
)
return storage.Client(
project=parsed_credentials.project_id, credentials=parsed_credentials
)
def build_all_docs(bucket_name, credentials):
all_blobs = storage_client(credentials).list_blobs(bucket_name)
docfx_blobs = [blob for blob in all_blobs if blob.name.startswith(DOCFX_PREFIX)]
build_blobs(docfx_blobs, credentials)
def build_one_doc(bucket_name, object_name, credentials):
blob = storage_client(credentials).bucket(bucket_name).get_blob(object_name)
if blob is None:
raise Exception(f"Could not find gs://{bucket_name}/{object_name}!")
build_blobs([blob], credentials)
def build_new_docs(bucket_name, credentials):
all_blobs = list(storage_client(credentials).list_blobs(bucket_name))
docfx_blobs = [blob for blob in all_blobs if blob.name.startswith(DOCFX_PREFIX)]
other_blobs = [blob for blob in all_blobs if not blob.name.startswith(DOCFX_PREFIX)]
other_names = set(map(lambda b: b.name, other_blobs))
new_blobs = []
for blob in docfx_blobs:
new_name = blob.name[len(DOCFX_PREFIX) :]
if new_name not in other_names:
new_blobs.append(blob)
build_blobs(new_blobs, credentials)
|
from app.service.storage.collection_crud import CollectionCrud
class Sources(list):
# Persistence
def bulk(self) -> CollectionCrud:
return CollectionCrud("source", self)
|
from matplotlib import pyplot as plt
import math
# this program shows the side lengths of a right triangle, and the derivative of a side length in the falling ladder calculus problem
constant = 5 # lenght of the hypotenuse
dt = 0.0001 # unit of time. More accurate results with a smaller dt
b_speed = -1 # per unit of time
a_values = [3]
b_values = [4]
t = [0]
d = []
while a_values[-1] < constant and b_values[-1] > 0:
b_values.append(b_values[-1] + dt * b_speed)
a_values.append(math.sqrt(constant ** 2 - b_values[-1] ** 2))
t.append(t[-1] + dt)
d.append((a_values[-1] - a_values[-2]) / dt)
d.append((a_values[-1] - a_values[-2]) / dt) # need to do this a second time so it has the same length as t. The more accurate alternative is creating d with the calculated derrivative at t=0
plt.plot(t, a_values, label='len of side a')
plt.plot(t, b_values, label="len of side b")
plt.plot(t, d, label="d of the len of side a")
plt.legend()
plt.show()
# print(a_values)
# print(b_values)
# print(t) |
# coding: utf-8
# Team : uyplayer team
# Author: uyplayer
# Date :2019/11/20 下午4:22
# Tool :PyCharm
'''
https://blog.csdn.net/c9Yv2cf9I06K2A9E/article/details/79739287
https://msd.misuland.com/pd/13340603045208861
'''
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size # 另一种语言的词汇量
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs): # forward的参数是decoder的输入
# decoder的input是另一种语言的词汇,要么是target,要么是上一个单元返回的output中概率最大的一个
# 初始的hidden用的是encoder的最后一个hidden输出
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
# 将embedded的256词向量和hidden的256词向量合在一起,变成512维向量
# 再用线性全连接变成10维(最长句子词汇数),在算softmax,看
attn_weight = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1
)
# torch.cat用于粘贴,dim=1指dim1方向粘贴
# torch.bmm是批矩阵乘操作,attention里将encoder的输出和attention权值相乘
# bmm: (1,1,10)*(1,10,256),权重*向量,得到attention向量
# unsqueeze用于插入一个维度(修改维度)
attn_applied = torch.bmm(attn_weight.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weight
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device) |
"""Treadmill test skip windows.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
if os.name == 'nt':
raise unittest.SkipTest("Test not applicable to Windows.")
|
{
"targets": [
{
"target_name": "manatee",
"sources": [ "src/manatee.cc" ],
"libraries": [ "-lBarcodeScanner" ]
}
]
}
|
import unittest
class FixturesTest(unittest.TestCase):
def setUp(self):
print('In setUp()')
r=1/0
self.fixture = range(1, 10)
def tearDown(self):
print('In tearDown()')
r=1/0
del self.fixture
def test_fixture1(self):
print('in test1()')
self.assertEqual(self.fixture, range(1, 10))
def test_fixture2(self):
print('in test2()')
self.assertEqual(self.fixture, range(2, 10))
if __name__ == '__main__':
unittest.main()
|
"""
BSD 3-Clause License
Copyright (c) 2021, Netskope OSS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""ArcSight Plugin SSL Log Handler."""
import os
import codecs
import logging
import logging.handlers
import ssl
import socket
from tempfile import NamedTemporaryFile
class SSLArcSightHandler(logging.handlers.SysLogHandler):
"""SSL ArcSightHandler Class."""
# We need to paste all this in because __init__ complains otherwise
# This all comes from logging.handlers.SysLogHandler
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
# The map below appears to be trivially lowercase the key. However,
# there's more to it than meets the eye - in some locales, lowercase
# gives unexpected results. See SF #1524081: in the Turkish locale,
# "INFO".lower() != "info"
priority_map = {
"DEBUG": "debug",
"INFO": "info",
"WARNING": "warning",
"ERROR": "error",
"CRITICAL": "critical",
}
def __init__(self, address, certs=None, facility=LOG_USER):
"""Init method."""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.unixsocket = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if certs:
cert = NamedTemporaryFile(delete=False)
cert.write(str.encode(certs))
cert.flush()
self.socket = ssl.wrap_socket(
s, ca_certs=cert.name, cert_reqs=ssl.CERT_REQUIRED
)
cert.close()
os.unlink(cert.name)
else:
self.socket = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.socket.connect(address)
def close(self):
"""Close method."""
self.socket.close()
logging.Handler.close(self)
def emit(self, record):
"""Emit Method."""
msg = self.format(record) + "\n"
prio = "<%d>" % self.encodePriority(
self.facility, self.mapPriority(record.levelname)
)
if type(msg) == "unicode":
msg = msg.encode("utf-8")
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
self.socket.write(str.encode(msg))
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
|
import os
import io
from google.cloud import vision
from google.cloud.vision import types
import cv2
def detect_cloth_image(path):
with open(path, 'rb') as image_file:
content = image_file.read()
print("AAA", type(content))
image = vision.types.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
object_name_list = ["Dress", "Top"]
coordinate_list = []
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
if object_.name in object_name_list:
coordinate_list.append(object_.bounding_poly.normalized_vertices)
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
return coordinate_list
def define_main_color():
with io.open("./result.png", 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.image_properties(image=image)
props = response.image_properties_annotation
print('Properties:')
# for color in props.dominant_colors.colors:
# print('fraction: {}'.format(color.pixel_fraction))
# print('\tr: {}'.format(color.color.red))
# print('\tg: {}'.format(color.color.green))
# print('\tb: {}'.format(color.color.blue))
# print('\ta: {}'.format(color.color.alpha))
dominant_color = props.dominant_colors.colors[0]
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return dominant_color
def crop_cloth_image(coordinate_list, path):
img = cv2.imread(path)
y = img.shape[0]
x = img.shape[1]
for coord in coordinate_list:
w = int((coord[1].x - coord[0].x)*x)
h = int((coord[3].y - coord[0].y)*y)
x0 = int(coord[0].x * x)
y0 = int(coord[0].y * y)
print(f'{x} {y} {w} {h}')
crop_image = img[y0:y0+h, x0:x0+w]
print(crop_image)
cv2.imwrite("result.png", crop_image)
if __name__ == "__main__":
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'config.json'
client = vision.ImageAnnotatorClient()
path = "./test1.jpg"
coordinate_list = detect_cloth_image(path)
crop_cloth_image(coordinate_list, path)
dominant_color = define_main_color()
print("dominant_color", dominant_color)
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class CheckerBoardFilterInputSpec(CommandLineInputSpec):
checkerPattern = InputMultiPath(traits.Int, desc="The pattern of input 1 and input 2 in the output image. The user can specify the number of checkers in each dimension. A checkerPattern of 2,2,1 means that images will alternate in every other checker in the first two dimensions. The same pattern will be used in the 3rd dimension.", sep=",", argstr="--checkerPattern %s")
inputVolume1 = File(position=-3, desc="First Input volume", exists=True, argstr="%s")
inputVolume2 = File(position=-2, desc="Second Input volume", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class CheckerBoardFilterOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class CheckerBoardFilter(SEMLikeCommandLine):
"""title: CheckerBoard Filter
category: Filtering
description: Create a checkerboard volume of two volumes. The output volume will show the two inputs alternating according to the user supplied checkerPattern. This filter is often used to compare the results of image registration. Note that the second input is resampled to the same origin, spacing and direction before it is composed with the first input. The scalar type of the output volume will be the same as the input image scalar type.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CheckerBoard
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = CheckerBoardFilterInputSpec
output_spec = CheckerBoardFilterOutputSpec
_cmd = "CheckerBoardFilter "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
|
from django.core.management.base import BaseCommand
from places.models import Place, Image
import requests
from django.core.files.base import ContentFile
class Command(BaseCommand):
help = 'Загружаем локации и картинки в БД'
def add_arguments(self, parser):
parser.add_argument('url', type=str)
def handle(self, *args, **options):
url = options['url']
response = requests.get(url)
response.raise_for_status()
response = response.json()
place, created = Place.objects.get_or_create(title=response['title'],
defaults={
"short_description": response['description_short'],
"long_description": response['description_long'],
"longitude": response['coordinates']['lng'],
"latitude": response['coordinates']['lat'],
"slug": response['title'],
},
)
print(created)
if not created:
for image_link in response['imgs']:
response = requests.get(image_link)
response.raise_for_status()
imagefile = ContentFile(response.content)
filename = image_link.split('/')[-1]
image = Image.objects.create(place=place)
image.position = image.id
image.image.save(filename, imagefile, save=True)
|
import sys
import numpy as np
def preprocess(text):
text = text.lower()
text = text.replace('.', ' .')
words = text.split(' ')
word_to_id = {}
id_to_word = {}
for word in words:
if word not in word_to_id:
new_id = len(word_to_id)
word_to_id[word] = new_id
id_to_word[new_id] = word
corpus = np.array([word_to_id[w] for w in words])
return corpus, word_to_id, id_to_word
def create_contexts_target(corpus, window_size=1):
target = corpus[window_size: -window_size]
contexts = []
for idx in range(window_size, len(corpus) - window_size):
cs = []
for t in range(- window_size, window_size + 1):
if t == 0:
continue
cs.append(corpus[idx + t])
contexts.append(cs)
return np.array(contexts), np.array(target)
def convert_one_hot(corpus, vocab_size):
'''one-hot表現への変換
:param corpus: 単語IDのリスト(1次元もしくは2次元のNumPy配列)
:param vocab_size: 語彙数
:return: one-hot表現(2次元もしくは3次元のNumPy配列)
'''
N = corpus.shape[0]
if corpus.ndim == 1:
one_hot = np.zeros((N, vocab_size), dtype=np.int32)
for idx, word_id in enumerate(corpus):
one_hot[idx, word_id] = 1
elif corpus.ndim == 2:
C = corpus.shape[1]
one_hot = np.zeros((N, C, vocab_size), dtype=np.int32)
for idx_0, word_ids in enumerate(corpus):
for idx_1, word_id in enumerate(word_ids):
one_hot[idx_0, idx_1, word_id] = 1
return one_hot
def clip_grads(grads, max_norm):
total_norm = 0
for grad in grads:
total_norm += np.sum(grad ** 2)
total_norm = np.sqrt(total_norm)
rate = max_norm / (total_norm + 1e-6)
if rate < 1:
for grad in grads:
grad *= rate
def cos_similarity(x, y, eps=1e-8):
'''コサイン類似度の算出
:param x: ベクトル
:param y: ベクトル
:param eps: ”0割り”防止のための微小値
:return:
'''
nx = x / (np.sqrt(np.sum(x ** 2)) + eps)
ny = y / (np.sqrt(np.sum(y ** 2)) + eps)
return np.dot(nx, ny)
def most_similar(query, word_to_id, id_to_word, word_matrix, top=5):
'''類似単語の検索
:param query: クエリ(テキスト)
:param word_to_id: 単語から単語IDへのディクショナリ
:param id_to_word: 単語IDから単語へのディクショナリ
:param word_matrix: 単語ベクトルをまとめた行列。各行に対応する単語のベクトルが格納されていることを想定する
:param top: 上位何位まで表示するか
'''
if query not in word_to_id:
print('%s is not found' % query)
return
print('\n[query] ' + query)
query_id = word_to_id[query]
query_vec = word_matrix[query_id]
vocab_size = len(id_to_word)
similarity = np.zeros(vocab_size)
for i in range(vocab_size):
similarity[i] = cos_similarity(word_matrix[i], query_vec)
count = 0
for i in (-1 * similarity).argsort():
if id_to_word[i] == query:
continue
print(' %s: %s' % (id_to_word[i], similarity[i]))
count += 1
if count >= top:
return
def eval_perplexity(model, corpus, batch_size=10, time_size=35):
print('evaluating perplexity ...')
corpus_size = len(corpus)
total_loss = 0
max_iters = (corpus_size - 1) // (batch_size * time_size)
jump = (corpus_size - 1) // batch_size
for iters in range(max_iters):
xs = np.zeros((batch_size, time_size), dtype=np.int32)
ts = np.zeros((batch_size, time_size), dtype=np.int32)
time_offset = iters * time_size
offsets = [time_offset + (i * jump) for i in range(batch_size)]
for t in range(time_size):
for i, offset in enumerate(offsets):
xs[i, t] = corpus[(offset + t) % corpus_size]
ts[i, t] = corpus[(offset + t + 1) % corpus_size]
try:
loss = model.forward(xs, ts, train_flg=False)
except TypeError:
loss = model.forward(xs, ts)
total_loss += loss
sys.stdout.write('\r%d / %d' % (iters, max_iters))
sys.stdout.flush()
print('')
ppl = np.exp(total_loss / max_iters)
return ppl |
from django.test import TestCase, Client
from django.urls import reverse
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.register_url = reverse('register')
def test_register_request(self):
response = self.client.get(self.register_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'users/register.html')
|
'''OpenGL extension APPLE.texture_2D_limited_npot
This module customises the behaviour of the
OpenGL.raw.GLES1.APPLE.texture_2D_limited_npot to provide a more
Python-friendly API
Overview (from the spec)
Conventional OpenGL ES 1.X texturing is limited to images with
power-of-two (POT) dimensions. APPLE_texture_2D_limited_npot extension
relaxes these size restrictions for 2D textures. The restrictions remain
in place for cube map and 3D textures, if supported.
There is no additional procedural or enumerant API introduced by this
extension except that an implementation which exports the extension string
will allow an application to pass in 2D texture dimensions that may or may
not be a power of two.
In the absence of OES_texture_npot, which lifts these restrictions, neither
mipmapping nor wrap modes other than CLAMP_TO_EDGE are supported in
conjunction with NPOT 2D textures. A NPOT 2D texture with a wrap mode that
is not CLAMP_TO_EDGE or a minfilter that is not NEAREST or LINEAR is
considered incomplete. If such a texture is bound to a texture unit, it is
as if texture mapping were disabled for that texture unit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/APPLE/texture_2D_limited_npot.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.APPLE.texture_2D_limited_npot import *
from OpenGL.raw.GLES1.APPLE.texture_2D_limited_npot import _EXTENSION_NAME
def glInitTexture2DLimitedNpotAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
from typing import Callable
import pytest
from web3 import Web3
from web3.contract import Contract
from raiden_synapse_modules.presence_router.blockchain_support import (
read_initial_services_addresses,
setup_contract_from_address,
install_filters,
)
from conftest import register_service
@pytest.mark.parametrize("number_of_services", [2])
def test_service_registry(
web3: Web3, service_registry_with_deposits: Contract, number_of_services: int
) -> None:
service_registry = service_registry_with_deposits
assert service_registry is not None
assert service_registry.functions.everMadeDepositsLen().call() == number_of_services
registered_services = read_initial_services_addresses(service_registry, "latest")
assert len(registered_services) == number_of_services
@pytest.mark.parametrize("number_of_services", [1])
def test_setup_contract_from_address(
service_registry_with_deposits: Contract, number_of_services: int
) -> None:
address = service_registry_with_deposits.address
service_registry = setup_contract_from_address(
address, service_registry_with_deposits.web3 # type: ignore
)
assert service_registry is not None
assert service_registry.functions.everMadeDepositsLen().call() == number_of_services
@pytest.mark.parametrize("number_of_services", [0])
def test_install_filters(
service_registry_with_deposits: Contract, custom_token: Contract, get_accounts: Callable
) -> None:
block_filter, event_filter = install_filters(service_registry_with_deposits)
assert block_filter.get_all_entries() == []
assert event_filter.get_all_entries() == []
account = get_accounts(1)[0]
register_service(service_registry_with_deposits, custom_token, account)
assert len(block_filter.get_all_entries()) == 4
assert len(event_filter.get_all_entries()) == 1
event = event_filter.get_all_entries()[0]
assert event.args.service.lower() == account.lower() # type: ignore
|
"""
Copyright (C) 2019-2021, Monash University, Geoscience Australia
Copyright (C) 2018, Stuart Walsh
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
# IO
from IO.XML import NewXMLTree,GetXMLTreeRoot,SaveXMLFile,LoadXMLFile
from IO.XML import HasChild,GetChild,GetChildren,AddChild
from IO.XML import GetAttributeString,GetAttributeFileString
from IO.XML import HasAttribute,GetAttributeStringOrDefault,GetAttributeValueOrDefault
from IO.XML import SetAttributeString
from IO.XML import GetXMLTag
from IO.XML import PrettyXMLFormat,AddNote
from Common.Common import BluecapError
class ActionManager(object):
"""Manager controlling which actions are run in the problem manager."""
def __init__(self):
""" Create an empty ActionManager object and default variables. """
self.actions = []
self.activeIndex = 0
def HasCompleted(self):
"""Returns true if all actions in this action manager have been completed."""
rv = len(self.actions) <= self.activeIndex
return rv
def RunNextAction(self, problemManager):
"""Run the next actions in the list of actions."""
if(self.activeIndex < len(self.actions)):
self.actions[self.activeIndex].Run(problemManager)
self.activeIndex += 1
def RunUntil(self, finalAction, problemManager):
"""Run actions until the final action is reached."""
if(self.activeIndex < len(self.actions) and self.activeIndex <= finalAction):
self.actions[self.activeIndex].Run(problemManager)
self.activeIndex += 1
def ParseXMLNode(self, acManagerNode, problemManager):
"""Generate actions from the action manager xml tree node."""
for child in GetChildren(acManagerNode):
type = GetXMLTag(child)
if(type != "note"):
self.actions.append( ActionFactory.CreateFromXML(type,child,problemManager) )
def WriteXMLNode(self, node):
"""Write actions to xml node."""
# functions
for action in self.actions:
type = action.typeStr
funcNode = AddChild(node,type)
action.WriteXMLNode(funcNode)
return node
####################
## Action factory
class ActionFactory:
"""Factory class used to generate action objects for the action manager."""
factories = {}
@staticmethod
def AddFactory(id, factory):
"""Add factory to the action factory manager."""
ActionFactory.factories[id] = factory
@staticmethod
def CreateFromXML(id,xmlNode,problemManager):
"""Run factory to create Action 'id' based on input from xml node."""
if not (id in ActionFactory.factories):
raise BluecapError("Error: Could not find " + id + " in ActionFactory")
return ActionFactory.factories[id].CreateFromXML(xmlNode,problemManager)
|
import numpy as np
import matplotlib.pyplot as plt
from ODEmethods.methods import rk_methods
from ODEmethods.rungekutta import RKMethod
# Chemical reactions: Iodine clock - Persulfate variation
def iodine_clock(t, y, par):
y0 = - par[0]*y[0]*y[1]
y1 = - par[0]*y[0]*y[1] - par[1]*y[1]*y[2] + par[2]*y[3]*y[5] + par[3]*y[5]*y[6]
y2 = par[0]*y[0]*y[1] - par[1]*y[1]*y[2]
y3 = par[1]*y[1]*y[2] - par[2]*y[3]*y[5]
y4 = 2*par[1]*y[1]*y[2]
y5 = - par[2]*y[3]*y[5] - par[3]*y[5]*y[6]
y6 = par[2]*y[3]*y[5] - par[3]*y[5]*y[6]
y7 = par[3]*y[5]*y[6]
return [y0, y1, y2, y3, y4, y5, y6, y7]
par = [1, 10, 100, 1000]
# RK methods
stepnum=10000
stepsize=0.001
problem_rk = RKMethod(rk_methods["original_rk"], iodine_clock, par)
rk = problem_rk.run(x0=0, xf=stepnum*stepsize, y0=[1., 0., 0.25, .09, 0., 1., 0., 0.], init_step=0.001)
# Script for plotting graphs
fig = plt.figure()
fig.set_size_inches(13, 5)
plt.subplot(1, 2, 1)
plt.plot(rk[0], rk[1][:,0])
plt.plot(rk[0], rk[1][:,1])
plt.plot(rk[0], rk[1][:,2])
plt.plot(rk[0], rk[1][:,3])
plt.plot(rk[0], rk[1][:,4])
plt.plot(rk[0], rk[1][:,5])
plt.plot(rk[0], rk[1][:,6])
plt.plot(rk[0], rk[1][:,7])
plt.xlabel(r'$t$')
plt.ylabel(r'$N(t)$')
plt.legend(('$S_2 O^{2-}_8$', '$I^-$', '$IS_2 O^{2-}_8$', '$I_2$', '$SO^{2-}_4$', '$S_2 O^{2-}_3$', '$IS_2 O^{-}_3$', '$S_4 O^{2-}_6$'), loc='upper right')
plt.subplot(1, 2, 2)
plt.semilogx(rk[0], rk[1][:,0])
plt.semilogx(rk[0], rk[1][:,1])
plt.semilogx(rk[0], rk[1][:,2])
plt.semilogx(rk[0], rk[1][:,3])
plt.semilogx(rk[0], rk[1][:,4])
plt.semilogx(rk[0], rk[1][:,5])
plt.semilogx(rk[0], rk[1][:,6])
plt.semilogx(rk[0], rk[1][:,7])
plt.xlabel(r'$t$')
plt.ylabel(r'$N(t)$')
plt.suptitle("Iodine clock: Persulfate variation")
plt.legend(('$S_2 O^{2-}_8$', '$I^-$', '$IS_2 O^{2-}_8$', '$I_2$', '$SO^{2-}_4$', '$S_2 O^{2-}_3$', '$IS_2 O^{-}_3$', '$S_4 O^{2-}_6$'), loc='upper left')
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.9, wspace=0.25, hspace=0.3)
plt.savefig("iodine_clock.png")
plt.show() |
import ast
from kanren import var
from tests.helpers import EvaloTestCase
class TestStatements(EvaloTestCase):
def test_expression_doesnt_change_env(self):
ret, _ = self.run_stmt(ast_expr=ast.Expr(value=ast.Num(n=1)), value=var())
self.assertEqual(ret[0], [])
def test_assignment_adds_variable_to_env(self):
ret, _ = self.run_stmt(
ast_expr=ast.Assign(
targets=[ast.Name(id="a", ctx=ast.Store())],
value=ast.Num(n=1),
),
value=var(),
env=[],
)
self.assertEqual(ret[0], [["a", 1]])
def test_reverse_interpret_assignment(self):
ret, _ = self.run_stmt(
ast_expr=ast.Assign(
targets=[ast.Name(id="a", ctx=ast.Store())],
value=var(),
),
value=[["a", []]],
env=[],
eval_expr=True,
)
self.assertIsInstance(ret[0], ast.Assign)
self.assertEqual(ast.literal_eval(ret[0].value), [])
|
# import the GPIO module
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def red_light_on():
# turn the light on
GPIO.output(light_red, True)
def red_light_off():
# turn the light off
GPIO.output(light_red, False)
def yellow_light_on():
# turn the light on
GPIO.output(light_yellow, True)
def yellow_light_off():
# turn the light off
GPIO.output(light_yellow, False)
def green_light_on():
# turn the light on
GPIO.output(light_green, True)
def green_light_off():
# turn the light off
GPIO.output(light_green, False)
# set which pin number we used for the lights
light_red = 26
light_yellow = 19
light_green = 13
# setup the pins as an output
GPIO.setup(light_red, GPIO.OUT)
GPIO.setup(light_yellow, GPIO.OUT)
GPIO.setup(light_green, GPIO.OUT)
# turn on the green light for go
red_light_off()
yellow_light_off()
green_light_on()
# wait for 3 seconds
time.sleep(3)
# switch to amber light
red_light_off()
yellow_light_on()
green_light_off()
# wait for 3 seconds
time.sleep(3)
# switch to red light for stop
red_light_on()
yellow_light_off()
green_light_off()
# wait for 5 seconds
time.sleep(5)
|
from types import FunctionType
from unittest.mock import patch
from django.test import TestCase
from django.core.exceptions import ValidationError
from vimage.core.validator_types import ValidationRuleDimensions
from vimage.core.exceptions import InvalidValueError
from .test_validation_rule_base import ValidatorTestCase
from .const import dotted_path
class ValidationRuleDimensionsTestCase(TestCase):
def test_init(self):
vr = ValidationRuleDimensions('DIMENSIONS', 100)
self.assertEqual(vr.unit, 'px')
def test_humanize_rule(self):
vr = ValidationRuleDimensions('DIMENSIONS', (400, 400))
self.assertEqual(vr.humanize_rule(), 'equal to 400 x 400px')
vr = ValidationRuleDimensions('DIMENSIONS', [(40, 40), (50, 50)])
self.assertEqual(
vr.humanize_rule(),
'equal to one of the following dimensions 40 x 40px or 50 x 50px'
)
vr = ValidationRuleDimensions('DIMENSIONS', [(4, 4), (5, 5), (6, 6)])
self.assertEqual(
vr.humanize_rule(),
'equal to one of the following dimensions 4 x 4px, 5 x 5px '
'or 6 x 6px'
)
def test_humanize_rule_dict(self):
vr = ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gte': 1000,
'lte': 1500,
},
'h': {
'gt': 500,
'lt': 600,
}
})
self.assertEqual(
vr.humanize_rule(),
'Width greater than or equal to 1000px and less than or equal to '
'1500px. Height greater than 500px and less than 600px'
)
vr = ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gte': 1000,
'lte': 1500,
},
})
self.assertEqual(
vr.humanize_rule(),
'Width greater than or equal to 1000px and less than or equal to '
'1500px'
)
vr = ValidationRuleDimensions('DIMENSIONS', {
'h': {
'gt': 500,
'lt': 600,
},
})
self.assertEqual(
vr.humanize_rule(),
'Height greater than 500px and less than 600px'
)
vr = ValidationRuleDimensions('DIMENSIONS', {
'gt': (500, 500),
'lt': (600, 600),
})
self.assertEqual(
vr.humanize_rule(),
'greater than 500 x 500px and less than 600 x 600px'
)
def test_prettify_list(self):
vr = ValidationRuleDimensions('DIMENSIONS', [(3, 3), (4, 4)])
self.assertListEqual(
vr.prettify_list([(3, 3), (4, 4)]),
['3 x 3px', '4 x 4px']
)
def test_prettify_value(self):
vr = ValidationRuleDimensions('DIMENSIONS', (3, 3))
self.assertEqual(vr.prettify_value((3, 3)), '3 x 3px')
self.assertEqual(vr.prettify_value((3, 3), 'Χ'), '3 Χ 3px')
def test_has_width_height_keys(self):
# Rule value must be a dict
vr = ValidationRuleDimensions('DIMENSIONS', 100)
with self.assertRaises(AttributeError):
vr.has_width_height_keys()
width_height_rules = [
ValidationRuleDimensions('DIMENSIONS', {'w': 1}),
ValidationRuleDimensions('DIMENSIONS', {'h': 1}),
ValidationRuleDimensions('DIMENSIONS', {'w': 1, 'h': 1}),
]
for rule in width_height_rules:
with self.subTest(rule=rule):
self.assertTrue(rule.has_width_height_keys())
no_width_height_rules = [
ValidationRuleDimensions('DIMENSIONS', {}),
ValidationRuleDimensions('DIMENSIONS', {'gte': 100}),
]
for rule in no_width_height_rules:
with self.subTest(rule=rule):
self.assertFalse(rule.has_width_height_keys())
@patch(dotted_path('validator_types', 'ValidationRuleDimensions',
'validate_operators'))
def test_valid_dict_rule_width_height(self, patch_method):
vr = ValidationRuleDimensions('DIMENSIONS', {'w': {}})
vr.valid_dict_rule()
args, kwargs = patch_method.call_args
self.assertTrue(patch_method.called)
self.assertEqual(args, ({}, int))
self.assertEqual(kwargs, {})
@patch(dotted_path('validator_types', 'ValidationRuleDimensions',
'validate_operators'))
def test_valid_dict_rule_wo_width_height(self, patch_method):
vr = ValidationRuleDimensions('DIMENSIONS', {})
vr.valid_dict_rule()
args, kwargs = patch_method.call_args
self.assertTrue(patch_method.called)
self.assertEqual(args, ({}, tuple))
self.assertEqual(kwargs, {})
def test_is_valid__dimensions_rule_type(self):
"""
"rule" should be either a tuple, a list or a dict filled with proper
key-value validation rules.
"""
vr = ValidationRuleDimensions('DIMENSIONS', '')
err = f'The value of the rule "DIMENSIONS", "", ' \
f'should be either a tuple, a list or a dict.'
with self.assertRaisesMessage(InvalidValueError, err):
vr.is_valid()
vr = ValidationRuleDimensions('DIMENSIONS', 12)
err = f'The value of the rule "DIMENSIONS", "12", ' \
f'should be either a tuple, a list or a dict.'
with self.assertRaisesMessage(InvalidValueError, err):
vr.is_valid()
def test_is_valid__dimensions_rule_tuple(self):
invalid_vrs = [
ValidationRuleDimensions('DIMENSIONS', ()),
ValidationRuleDimensions('DIMENSIONS', (10,)),
ValidationRuleDimensions('DIMENSIONS', (-10, 10)),
ValidationRuleDimensions('DIMENSIONS', (-10, -10)),
ValidationRuleDimensions('DIMENSIONS', (10, 10, 10)),
]
for vr in invalid_vrs:
with self.subTest(vr=vr):
err = f'The value of the rule "DIMENSIONS", "{vr.rule}", ' \
f'should consist of two positive integers.'
with self.assertRaisesMessage(InvalidValueError, err):
vr.is_valid()
vr = ValidationRuleDimensions('DIMENSIONS', (10, 10))
self.assertIsNone(vr.is_valid())
def test_is_valid__dimensions_rule_list(self):
invalid_vrs = [
ValidationRuleDimensions('DIMENSIONS', []),
ValidationRuleDimensions('DIMENSIONS', [(10,)]),
ValidationRuleDimensions('DIMENSIONS', [(10, -10)]),
ValidationRuleDimensions('DIMENSIONS', [(10, 10), (-10, 10)]),
]
for vr in invalid_vrs:
with self.subTest(vr=vr):
err = f'The value of the rule "DIMENSIONS", "{vr.rule}", ' \
f'should consist of tuples with two positive ' \
f'integers, each.'
with self.assertRaisesMessage(InvalidValueError, err):
vr.is_valid()
vr = ValidationRuleDimensions('DIMENSIONS', [(10, 10), (5, 5)])
self.assertIsNone(vr.is_valid())
def test_is_valid__dimensions_rule_dict(self):
vr = ValidationRuleDimensions('DIMENSIONS', {})
err = f'The value of the rule "DIMENSIONS", "{{}}", ' \
f'should be a non-empty dict.'
with self.assertRaisesMessage(InvalidValueError, err):
vr.is_valid()
with patch(dotted_path('validator_types', 'ValidationRuleDimensions',
'valid_dict_rule')) as m:
vr = ValidationRuleDimensions('DIMENSIONS', {'gte': 100})
vr.is_valid()
# if dict is non-empty check that "valid_dict_rule" is called.
self.assertTrue(m.called)
class ValidationRuleDimensionsValidatorTestCase(ValidatorTestCase):
def test_generator_is_function(self):
vr = ValidationRuleDimensions('a', 1)
validator = vr.generate_validator()
self.assertIsInstance(validator, FunctionType)
def test_generator_docstring(self):
vr = ValidationRuleDimensions('a', 1)
validator = vr.generate_validator()
self.assertEqual(validator.__doc__, 'a: 1')
def test_generate_validator_tuple_valid(self):
vr = ValidationRuleDimensions('DIMENSIONS', (500, 498))
validator = vr.generate_validator()
self.assertIsNone(validator(self.img))
def test_generate_validator_tuple_invalid(self):
vr = ValidationRuleDimensions('DIMENSIONS', (500, 500))
validator = vr.generate_validator()
with self.assertRaises(ValidationError):
validator(self.img)
def test_generate_validator_list_valid(self):
vr = ValidationRuleDimensions('DIMENSIONS', [(500, 500), (500, 498)])
validator = vr.generate_validator()
self.assertIsNone(validator(self.img))
def test_generate_validator_list_invalid(self):
vr = ValidationRuleDimensions('DIMENSIONS', [(500, 500), (100, 100)])
validator = vr.generate_validator()
with self.assertRaises(ValidationError):
validator(self.img)
def test_generate_validator_dict_valid(self):
valid_rules = [
ValidationRuleDimensions('DIMENSIONS', {
'gte': (500, 498),
'lte': (500, 498),
}),
ValidationRuleDimensions('DIMENSIONS', {'lt': (600, 600)}),
ValidationRuleDimensions('DIMENSIONS', {'lte': (500, 498)}),
ValidationRuleDimensions('DIMENSIONS', {'gte': (500, 498)}),
ValidationRuleDimensions('DIMENSIONS', {
'gte': (500, 498),
'lte': (500, 498),
'ne': (100, 100),
}),
ValidationRuleDimensions('DIMENSIONS', {
'gte': (500, 498),
'lte': (500, 498),
'ne': (100, 100),
'err': 'dimensions error message',
}),
ValidationRuleDimensions('DIMENSIONS', {'ne': (100, 100)}),
ValidationRuleDimensions('DIMENSIONS', {'eq': (500, 498)}),
ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gt': 100,
},
'h': {
'eq': 498,
}
}),
ValidationRuleDimensions('DIMENSIONS', {
'w': {
'eq': 500,
},
}),
ValidationRuleDimensions('DIMENSIONS', {
'h': {
'eq': 498,
'err': 'width error message',
}
}),
ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gt': 100,
'err': 'width error message',
},
'h': {
'eq': 498,
'err': 'height error message',
}
}),
]
for vr in valid_rules:
with self.subTest(vr=vr):
validator = vr.generate_validator()
self.assertIsNone(validator(self.img))
def test_generate_validator_dict_invalid(self):
invalid_rules = [
ValidationRuleDimensions('DIMENSIONS', {'gte': (150, 1000)}),
ValidationRuleDimensions('DIMENSIONS', {'lt': (500, 498)}),
ValidationRuleDimensions('DIMENSIONS', {'lte': (300, 400)}),
ValidationRuleDimensions('DIMENSIONS', {
'gte': (150, 700),
'lte': (100, 100),
}),
ValidationRuleDimensions('DIMENSIONS', {
'gte': (600, 100),
'lt': (1000, 1000),
'ne': (450, 450),
}),
ValidationRuleDimensions('DIMENSIONS', {'ne': (500, 498)}),
ValidationRuleDimensions('DIMENSIONS', {'eq': (50, 498)}),
ValidationRuleDimensions('DIMENSIONS', {
'w': {
'lt': 100,
},
'h': {
'eq': 498,
}
}),
ValidationRuleDimensions('DIMENSIONS', {
'h': {
'ne': 498,
}
}),
ValidationRuleDimensions('DIMENSIONS', {
'w': {
'lte': 499,
},
}),
]
for vr in invalid_rules:
with self.subTest(vr=vr):
validator = vr.generate_validator()
with self.assertRaises(ValidationError):
validator(self.img)
def test_generate_validator_custom_error(self):
# testing custom width/height error messages
vr = ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gt': 500,
'err': 'width error message.',
},
'h': {
'eq': 100,
'err': 'height error message.',
}
})
validator = vr.generate_validator()
err = 'width error message. height error message'
with self.assertRaisesMessage(ValidationError, err):
validator(self.img)
vr = ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gt': 500,
'err': 'width error message.',
},
'h': {
'eq': 100,
}
})
validator = vr.generate_validator()
err = 'width error message.'
with self.assertRaisesMessage(ValidationError, err):
validator(self.img)
vr = ValidationRuleDimensions('DIMENSIONS', {
'w': {
'gt': 500,
'err': 'width error message.',
}
})
validator = vr.generate_validator()
err = 'width error message.'
with self.assertRaisesMessage(ValidationError, err):
validator(self.img)
vr = ValidationRuleDimensions('DIMENSIONS', {
'lt': (50, 50),
'err': 'error here!',
})
validator = vr.generate_validator()
err = 'error here!'
with self.assertRaisesMessage(ValidationError, err):
validator(self.img)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:58:27 2019
@author: TempestGuerra
"""
import numpy as np
def computeTemperatureProfileOnGrid(PHYS, REFS, Z_in, T_in, isSmooth, isUniform):
# Get REFS data
z = REFS[1]
ZTL = REFS[5]
NC = len(ZTL[0,:])
TZ = np.zeros(ZTL.shape)
DTDZ = np.zeros(ZTL.shape)
D2TDZ2 = np.zeros(ZTL.shape)
if isUniform:
# Loop over each column and evaluate termperature for uniform N
T0 = T_in[0]
A = PHYS[7]**2 / PHYS[0]
C = PHYS[0] / PHYS[2]
for cc in range(NC):
zcol = ZTL[:,cc]
EXPF = np.exp(A * zcol)
TZ[:,cc] = T0 * EXPF + (C / A) * (1.0 - EXPF)
DTDZ[:,cc] = (A * T0 - C) * EXPF
D2TDZ2[:,cc] = A * (A * T0 - C) * EXPF
else:
if isSmooth:
ZTP = Z_in[1] # tropopause height
ZTM = Z_in[2] # top of stratospheric mixed layer
ZH = Z_in[3] # top of the model atmosphere
TS = T_in[0] # Surface temperature
TTP = T_in[1] # Temperature at tropopause
TTM = T_in[2] # Temperature at top of mixed layer
TH = T_in[3] # Temperature at model top
DTS = (TTP - TS) / (ZTP - Z_in[0])
DTH = (TH - TTM) / (ZH - ZTM)
# 3rd order polynomial fit coefficient matrix
VandermondeM = np.array([[1.0, ZTP, ZTP**2, ZTP**3], \
[1.0, ZTM, ZTM**2, ZTM**3], \
[0.0, 1.0, 2*ZTP, 3*ZTP**2], \
[0.0, 1.0, 2*ZTM, 3*ZTM**2]])
# 5th order polynomial fit RHS
VRHS = [TTP, \
TTM, \
DTS, \
DTH]
'''
# 5th order polynomial fit coefficient matrix
VandermondeM = np.array([[ZTP**2, ZTP**3, ZTP**4, ZTP**5], \
[ZTM**2, ZTM**3, ZTM**4, ZTM**5], \
[ZH**2, ZH**3, ZH**4, ZH**5], \
[2*ZH, 3*ZH**2, 4*ZH**3, 5*ZH**4]])
# 5th order polynomial fit RHS
VRHS = [TTP - TS - ZTP*DTS, \
TTM - TS - ZTM*DTS, \
TH - TS - ZH*DTS, \
DTH - DTS]
'''
coeffs = np.linalg.solve(VandermondeM, VRHS)
# Loop over each column and evaluate interpolant
for cc in range(NC):
zcol = ZTL[:,cc]
# Get the 1D linear interpolation for this sounding
TZ[:,cc] = np.interp(zcol, Z_in, T_in)
# Get piece-wise derivatives, loop over layers
for pp in range(len(Z_in) - 1):
# Local lapse rate
LR = (T_in[pp+1] - T_in[pp]) / (Z_in[pp+1] - Z_in[pp])
# Loop over the layer
for kk in range(len(zcol)):
if (z[kk] >= Z_in[pp]) and (z[kk] <= Z_in[pp+1]):
DTDZ[kk,cc] = LR
D2TDZ2[kk,cc] = 0.0
# Adjust the tropopause to smooth the profile
tpDex = [kk for kk in range(len(zcol)) if ZTP <= zcol[kk] <= ZTM]
# Evaluate the polynomial and derivative (lapse rates)
TZ[tpDex,cc] = coeffs[0] + coeffs[1] * zcol[tpDex] + \
coeffs[2] * np.power(zcol[tpDex],2) + \
coeffs[3] * np.power(zcol[tpDex],3)
DTDZ[tpDex,cc] = coeffs[1] + 2 * coeffs[2] * zcol[tpDex] + \
3 * coeffs[3] * np.power(zcol[tpDex],2)
D2TDZ2[tpDex,cc] = 2 * coeffs[2] + 6 * coeffs[3] * zcol[tpDex]
'''
# Loop over each column and evaluate interpolant
TZ = np.zeros(ZTL.shape)
DTDZ = np.zeros(ZTL.shape)
for cc in range(NC):
zcol = ZTL[:,cc]
# Evaluate the polynomial and derivative (lapse rates)
TZ[:,cc] = TS + DTS * zcol + coeffs[0] * np.power(zcol,2) \
+ coeffs[1] * np.power(zcol,3) \
+ coeffs[2] * np.power(zcol,4) \
+ coeffs[3] * np.power(zcol,5)
DTDZ[:,cc] = DTS + (2 * coeffs[0] * zcol) \
+ (3 * coeffs[1] * np.power(zcol,2)) \
+ (4 * coeffs[2] * np.power(zcol,3)) \
+ (5 * coeffs[3] * np.power(zcol,4))
'''
else:
# Loop over each column and evaluate interpolant
TZ = np.zeros(ZTL.shape)
DTDZ = np.zeros(ZTL.shape)
for cc in range(NC):
zcol = ZTL[:,cc]
# Get the 1D linear interpolation for this sounding
TZ[:,cc] = np.interp(zcol, Z_in, T_in)
# Get piece-wise derivatives, loop over layers
for pp in range(len(Z_in) - 1):
# Local lapse rate
LR = (T_in[pp+1] - T_in[pp]) / (Z_in[pp+1] - Z_in[pp])
# Loop over the layer
for kk in range(len(zcol)):
if (z[kk] >= Z_in[pp]) and (z[kk] <= Z_in[pp+1]):
DTDZ[kk,cc] = LR
D2TDZ2[kk,cc] = 0.0
return TZ, DTDZ, D2TDZ2 |
"""Test google directory service module functionality."""
from config import PATHS
from mock import MagicMock
from mock import patch
import sys
import unittest
# Need to mock the call to get an XSRF token at function definition time, i.e.
# when the module is loaded. http://stackoverflow.com/a/7667621/2830207
def MockToken():
"""Mock token generator that returns empty."""
return ''
MOCK_XSRF = MagicMock()
MOCK_XSRF.XSRFToken = MockToken
sys.modules['xsrf'] = MOCK_XSRF
from appengine_config import JINJA_ENVIRONMENT
import google_directory_service
from google_directory_service import GoogleDirectoryService
from google_directory_service import MY_CUSTOMER_ALIAS
from google_directory_service import NUM_RETRIES
from google_directory_service import VALID_WATCH_EVENTS
def MockHttpFunction():
"""Mock http function to return a mocked object."""
return MOCK_HTTP
MOCK_HTTP = MagicMock()
MOCK_OAUTH_DECORATOR = MagicMock()
MOCK_OAUTH_DECORATOR.http = MockHttpFunction
MOCK_SERVICE = MagicMock()
FAKE_EMAIL_1 = 'foo@mybusiness.com'
FAKE_EMAIL_2 = 'bar@mybusiness.com'
FAKE_USER_1 = {}
FAKE_USER_1['primaryEmail'] = FAKE_EMAIL_1
FAKE_USER_1['isAdmin'] = True
FAKE_USER_2 = {}
FAKE_USER_2['primaryEmail'] = FAKE_EMAIL_2
FAKE_USER_2['isAdmin'] = False
FAKE_USERS = [FAKE_USER_1, FAKE_USER_2]
FAKE_ID_1 = 'some id 1' # Also doubles as a user key
FAKE_ID_2 = 'some id 2'
FAKE_GROUP_MEMBER_USER_1 = {}
FAKE_GROUP_MEMBER_USER_1['type'] = 'USER'
FAKE_GROUP_MEMBER_USER_1['id'] = FAKE_ID_1
FAKE_GROUP_MEMBER_USER_2 = {}
FAKE_GROUP_MEMBER_USER_2['type'] = 'USER'
FAKE_GROUP_MEMBER_USER_2['id'] = FAKE_ID_2
FAKE_GROUP_MEMBER_GROUP = {}
FAKE_GROUP_MEMBER_GROUP['type'] = 'GROUP'
FAKE_GROUP = [FAKE_GROUP_MEMBER_USER_1, FAKE_GROUP_MEMBER_USER_2,
FAKE_GROUP_MEMBER_GROUP]
FAKE_PAGE_TOKEN = 'I am a fake page token'
FAKE_GROUP_KEY = 'my_group@mybusiness.com'
class GoogleDirectoryServiceTest(unittest.TestCase):
"""Test google directory service class functionality."""
@patch('google_directory_service.build')
def setUp(self, mock_build):
"""Setup test object on which to call methods later on."""
# pylint: disable=arguments-differ
mock_build.return_value = MOCK_SERVICE
self.directory_service = GoogleDirectoryService(MOCK_OAUTH_DECORATOR)
@patch('google_directory_service.build')
def testInit(self, mock_build):
"""Test that init passes the correct parameters and creates an object."""
fake_service = MagicMock()
mock_build.return_value = fake_service
google_directory_service = GoogleDirectoryService(MOCK_OAUTH_DECORATOR)
mock_build.assert_called_once_with(serviceName='admin',
version='directory_v1',
http=MOCK_HTTP)
self.assertEqual(google_directory_service.service, fake_service)
def testConstantDefinitions(self):
"""Test the constants set in GoogleDirectoryService are as expected."""
self.assertEqual(MY_CUSTOMER_ALIAS, 'my_customer')
self.assertEqual(NUM_RETRIES, 3)
allowed_watch_events = ['add', 'delete', 'makeAdmin', 'undelete', 'update']
self.assertEqual(VALID_WATCH_EVENTS, allowed_watch_events)
@patch.object(MOCK_SERVICE.users.list, 'execute')
@patch.object(MOCK_SERVICE.users, 'list')
@patch.object(MOCK_SERVICE, 'users')
def testGetUsers(self, mock_users, mock_list, mock_execute):
"""Test the get users request handles a valid response correctly."""
fake_dictionary = {}
fake_dictionary['users'] = FAKE_USERS
mock_execute.return_value = fake_dictionary
mock_list.return_value.execute = mock_execute
mock_users.return_value.list = mock_list
self.directory_service.users = mock_users
users_returned = self.directory_service.GetUsers()
mock_users.assert_called_once_with()
mock_list.assert_called_once_with(customer=MY_CUSTOMER_ALIAS,
maxResults=500, pageToken='',
projection='full', orderBy='email')
mock_execute.assert_called_once_with(num_retries=NUM_RETRIES)
self.assertEqual(users_returned, FAKE_USERS)
@patch.object(MOCK_SERVICE.users.list, 'execute')
@patch.object(MOCK_SERVICE.users, 'list')
@patch.object(MOCK_SERVICE, 'users')
def testGetUsersPaged(self, mock_users, mock_list, mock_execute):
"""Test the get users request handles a long valid response correctly."""
fake_dictionary_1 = {}
fake_dictionary_1['users'] = FAKE_USERS
fake_dictionary_1['nextPageToken'] = FAKE_PAGE_TOKEN
fake_extra_user = MagicMock()
fake_dictionary_2 = {}
fake_dictionary_2['users'] = [fake_extra_user]
expected_list = []
expected_list += FAKE_USERS
expected_list += [fake_extra_user]
def SideEffect(customer, maxResults, pageToken, projection, orderBy):
"""Mock list function to return different mock execute calls."""
# pylint: disable=unused-argument
# pylint: disable=invalid-name
if pageToken == '':
mock_execute.return_value = fake_dictionary_1
else:
mock_execute.return_value = fake_dictionary_2
some_object = MagicMock()
some_object.execute = mock_execute
return some_object
mock_list.side_effect = SideEffect
mock_users.return_value.list = mock_list
self.directory_service.users = mock_users
users_returned = self.directory_service.GetUsers()
mock_users.assert_any_call()
mock_list.assert_any_call(customer=MY_CUSTOMER_ALIAS,
maxResults=500, pageToken='',
projection='full', orderBy='email')
mock_list.assert_any_call(customer=MY_CUSTOMER_ALIAS,
maxResults=500, pageToken=FAKE_PAGE_TOKEN,
projection='full', orderBy='email')
mock_execute.assert_any_call(num_retries=NUM_RETRIES)
self.assertEqual(users_returned, expected_list)
@patch.object(GoogleDirectoryService, 'GetUser')
@patch.object(MOCK_SERVICE.members.list, 'execute')
@patch.object(MOCK_SERVICE.members, 'list')
@patch.object(MOCK_SERVICE, 'members')
def testGetUsersByGroupKey(self, mock_members, mock_list, mock_execute,
mock_get_user):
"""Test get users by group key handles a valid response correctly."""
fake_dictionary = {}
fake_dictionary['members'] = FAKE_GROUP
mock_execute.return_value = fake_dictionary
mock_list.return_value.execute = mock_execute
mock_members.return_value.list = mock_list
self.directory_service.users = mock_members
expected_list = [FAKE_GROUP_MEMBER_USER_1, FAKE_GROUP_MEMBER_USER_2]
def SideEffect(user_key):
"""Mock get user function to return different users after group get."""
if user_key == FAKE_ID_1:
return FAKE_GROUP_MEMBER_USER_1
else:
return FAKE_GROUP_MEMBER_USER_2
mock_get_user.side_effect = SideEffect
users_returned = self.directory_service.GetUsersByGroupKey(FAKE_GROUP_KEY)
mock_members.assert_called_once_with()
mock_list.assert_called_once_with(groupKey=FAKE_GROUP_KEY)
mock_execute.assert_called_once_with(num_retries=NUM_RETRIES)
self.assertEqual(users_returned, expected_list)
@patch.object(GoogleDirectoryService, 'GetUser')
@patch.object(MOCK_SERVICE.members.list, 'execute')
@patch.object(MOCK_SERVICE.members, 'list')
@patch.object(MOCK_SERVICE, 'members')
def testGetUsersByGroupKeyPaged(self, mock_members, mock_list, mock_execute,
mock_get_user):
"""Test get users by group key handles a long valid response correctly."""
fake_dictionary_1 = {}
fake_dictionary_1['members'] = FAKE_GROUP
fake_dictionary_1['nextPageToken'] = FAKE_PAGE_TOKEN
fake_dictionary_2 = {}
fake_dictionary_2['members'] = FAKE_GROUP
expected_list = [FAKE_GROUP_MEMBER_USER_1, FAKE_GROUP_MEMBER_USER_2,
FAKE_GROUP_MEMBER_USER_1, FAKE_GROUP_MEMBER_USER_2]
def SideEffect1(groupKey, pageToken=''):
"""Mock list function to return different mock execute calls."""
# pylint: disable=unused-argument
# pylint: disable=invalid-name
if pageToken == '':
mock_execute.return_value = fake_dictionary_1
else:
mock_execute.return_value = fake_dictionary_2
some_object = MagicMock()
some_object.execute = mock_execute
return some_object
mock_list.side_effect = SideEffect1
mock_members.return_value.list = mock_list
self.directory_service.users = mock_members
def SideEffect2(user_key):
"""Mock get user function to return different users after group get."""
if user_key == FAKE_ID_1:
return FAKE_GROUP_MEMBER_USER_1
else:
return FAKE_GROUP_MEMBER_USER_2
mock_get_user.side_effect = SideEffect2
users_returned = self.directory_service.GetUsersByGroupKey(FAKE_GROUP_KEY)
mock_members.assert_any_call()
mock_list.assert_any_call(groupKey=FAKE_GROUP_KEY)
mock_list.assert_any_call(groupKey=FAKE_GROUP_KEY,
pageToken=FAKE_PAGE_TOKEN)
mock_execute.assert_any_call(num_retries=NUM_RETRIES)
self.assertEqual(users_returned, expected_list)
@patch.object(MOCK_SERVICE.users.get, 'execute')
@patch.object(MOCK_SERVICE.users, 'get')
@patch.object(MOCK_SERVICE, 'users')
def testGetUser(self, mock_users, mock_get, mock_execute):
"""Test the get user request handles a valid response correctly."""
mock_execute.return_value = FAKE_USER_1
mock_get.return_value.execute = mock_execute
mock_users.return_value.get = mock_get
self.directory_service.users = mock_users
user_returned = self.directory_service.GetUser(FAKE_ID_1)
mock_users.assert_called_once_with()
mock_get.assert_called_once_with(userKey=FAKE_ID_1, projection='full')
mock_execute.assert_called_once_with(num_retries=NUM_RETRIES)
self.assertEqual(user_returned, FAKE_USER_1)
@patch.object(GoogleDirectoryService, 'GetUser')
def testGetUserAsList(self, mock_get_user):
"""Test get user as list turns a valid response into a list."""
mock_get_user.return_value = FAKE_USER_1
user_list_returned = self.directory_service.GetUserAsList(FAKE_ID_1)
mock_get_user.assert_called_once_with(FAKE_ID_1)
self.assertEqual(user_list_returned, [FAKE_USER_1])
@patch.object(GoogleDirectoryService, 'GetUser')
def testIsAdminUser(self, mock_get_user):
"""Test is admin user returns whether a user is an admin."""
def SideEffect(user_key):
"""Mock get user function to return different users based on key."""
if user_key == FAKE_ID_1:
return FAKE_USER_1
else:
return FAKE_USER_2
mock_get_user.side_effect = SideEffect
boolean_returned = self.directory_service.IsAdminUser(FAKE_ID_1)
mock_get_user.assert_called_with(FAKE_ID_1)
self.assertEqual(boolean_returned, True)
boolean_returned = self.directory_service.IsAdminUser(FAKE_ID_2)
mock_get_user.assert_called_with(FAKE_ID_2)
self.assertEqual(boolean_returned, False)
@patch('datastore.NotificationChannel.Insert')
@patch.object(MOCK_SERVICE.users.watch, 'execute')
@patch.object(MOCK_SERVICE.users, 'watch')
@patch.object(MOCK_SERVICE, 'users')
@patch('google_directory_service.time')
@patch('datastore.NotificationChannel.GetAll')
def testWatchUsers(self, mock_get_all, mock_time, mock_users, mock_watch,
mock_execute, mock_insert):
"""Test watch users requests a channel then inserts into the datastore."""
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments
fake_resource_id = 'some resource id'
fake_watch_result = {}
fake_watch_result['resourceId'] = fake_resource_id
mock_execute.return_value = fake_watch_result
mock_watch.return_value.execute = mock_execute
mock_users.return_value.watch = mock_watch
self.directory_service.users = mock_users
fake_time = 1.001
fake_time_in_millis = '1001'
mock_time.return_value = fake_time
invalid_event = 'foo'
fake_event_already_watched = 'add'
fake_event_not_watched = 'delete'
fake_channel = MagicMock(event=fake_event_already_watched)
mock_get_all.return_value = [fake_channel]
# Test with an invalid event, which should just return.
self.directory_service.WatchUsers(invalid_event)
mock_get_all.assert_not_called()
mock_time.assert_not_called()
mock_users.assert_not_called()
mock_watch.assert_not_called()
mock_execute.assert_not_called()
mock_insert.assert_not_called()
# Test with an event already in the datastore, which should just return.
self.directory_service.WatchUsers(fake_event_already_watched)
mock_get_all.assert_any_call()
mock_time.assert_not_called()
mock_users.assert_not_called()
mock_watch.assert_not_called()
mock_execute.assert_not_called()
mock_insert.assert_not_called()
# Test with an event not in the datastore, which should flow through.
self.directory_service.WatchUsers(fake_event_not_watched)
fake_body = {}
fake_body['id'] = (MY_CUSTOMER_ALIAS + '_' + fake_event_not_watched + '_' +
fake_time_in_millis)
fake_body['type'] = 'web_hook'
fake_address = (JINJA_ENVIRONMENT.globals['BASE_URL'] +
PATHS['receive_push_notifications'])
fake_body['address'] = fake_address
mock_get_all.assert_any_call()
mock_time.assert_called_once_with()
mock_users.assert_called_once_with()
mock_watch.assert_called_once_with(customer=MY_CUSTOMER_ALIAS,
event=fake_event_not_watched,
projection='full', orderBy='email',
body=fake_body)
mock_execute.assert_called_once_with(num_retries=NUM_RETRIES)
mock_insert.assert_called_once_with(event=fake_event_not_watched,
channel_id=fake_body['id'],
resource_id=fake_resource_id)
@patch('datastore.NotificationChannel.Delete')
@patch.object(MOCK_SERVICE.channels.stop, 'execute')
@patch.object(MOCK_SERVICE.channels, 'stop')
@patch.object(MOCK_SERVICE, 'channels')
def testStopNotifications(self, mock_channels, mock_stop, mock_execute,
mock_delete):
"""Test stop notifications requests with stop then deletes the channel."""
fake_id = 'foobarbaz'
fake_key = MagicMock(id=fake_id)
fake_channel_id = 'some channel id'
fake_resource_id = 'some resource id'
fake_notification_channel = MagicMock(channel_id=fake_channel_id,
resource_id=fake_resource_id,
key=fake_key)
fake_body = {}
fake_body['id'] = fake_channel_id
fake_body['resourceId'] = fake_resource_id
mock_stop.return_value.execute = mock_execute
mock_channels.return_value.stop = mock_stop
self.directory_service.channels = mock_channels
self.directory_service.StopNotifications(fake_notification_channel)
mock_channels.assert_called_once_with()
mock_stop.assert_called_once_with(body=fake_body)
mock_execute.assert_called_once_with(num_retries=NUM_RETRIES)
mock_delete.assert_called_once_with(fake_id)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import inspect
from xml.etree import ElementTree as ET
from ..extern import six
# OrderedDict compat (Python 2.7+ and 3.1+)
try:
from collections import OrderedDict
except ImportError:
from ..extern.ordered_dict import OrderedDict
# ArgParse compat (Python 2.7+ and 3.2+)
try:
import argparse
except ImportError:
from ..extern import argparse
# ElementTree compat (Python 2.7+ and 3.3+)
ET_Element = ET.Element if isinstance(ET.Element, six.class_types) else ET._Element
ET_Tree_iter = ET.ElementTree.iter if hasattr(ET.ElementTree, 'iter') else ET.ElementTree.getiterator
ET_Element_iter = ET_Element.iter if hasattr(ET_Element, 'iter') else ET_Element.getiterator
ET_ParseError = ET.ParseError if hasattr(ET, 'ParseError') else None
# signature.bind(...).arguments compat (Python 3.3+)
def bind_arguments(func, *args, **kwargs):
# Python 3.3+
try:
signature = inspect.signature(func)
arguments = signature.bind(*args, **kwargs).arguments
except AttributeError:
# Python 2.7+
try:
arguments = inspect.getcallargs(func, *args, **kwargs)
defaults = inspect.getcallargs(func, (), ())
for arg in arguments.keys():
if (defaults[arg] == arguments[arg]) and (arg not in kwargs):
del arguments[arg]
except AttributeError:
arguments = kwargs
return arguments
|
import os
import errno
class MakeDirectory:
def __init__(self, prefix, suffix):
self.prefix = prefix
self.suffix = suffix
self.folder_out = None
def get_dir_name(self, print_dir):
if type(self.suffix) == tuple and len(self.suffix) > 1:
for i in range(0, len(self.suffix)):
self.prefix += "_" + str(self.suffix[i])
print("if")
else:
self.prefix = "_" + str(self.suffix)
print("else", self.prefix)
folder_out = self.prefix + "/"
if print_dir is True:
print("============folder out:============\n{}"
"\n===================================".format(folder_out))
self.folder_out = folder_out
return folder_out
def make_dir(self):
try:
os.makedirs(self.folder_out)
except OSError as e:
if e.errno != errno.EEXIST:
raise
PARENT_PATH = 'D:/OH YEAH/'
LAYERS = 54
dir_manager = MakeDirectory(prefix=PARENT_PATH+'hw3_q2_taxi', suffix=LAYERS)
FOLDER_OUT = dir_manager.get_dir_name(print_dir=True)
dir_manager.make_dir()
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Leon Jacobs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from os import path
import click
def generate_template(destination):
"""
Write a copy of the GNU Radio template to
a new file.
:param destination:
:return:
"""
template_store = '/../share'
template_file = '/template.grc'
# Resolve the path to the source template
source_template = path.realpath(
path.abspath(path.dirname(__file__) + template_store)) + template_file
click.secho('Writing a GNU Radio template XML to: {}'.format(destination), fg='green')
# Open the template
with open(source_template, 'r') as s:
# And write it to the new destination.
with open(destination, 'w') as d:
# Line by line.
for line in s.readlines():
d.write(line)
click.secho('Done.')
|
import torch
from retriever import UnifiedRetriever
from data_retriever import ZeshelDataset, transform_entities, load_data, \
get_all_entity_hiddens, get_hard_negative, \
Data
from util import Logger
import argparse
import numpy as np
import os
import random
import torch.nn as nn
from transformers import BertTokenizer, BertModel, AdamW, \
get_linear_schedule_with_warmup, get_constant_schedule, RobertaTokenizer, \
RobertaModel
from torch.utils.data import DataLoader
from tqdm import tqdm
def set_seeds(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def configure_optimizer(args, model, num_train_examples):
# https://github.com/google-research/bert/blob/master/optimization.py#L25
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr,
eps=args.adam_epsilon)
num_train_steps = int(num_train_examples / args.B /
args.gradient_accumulation_steps * args.epochs)
num_warmup_steps = int(num_train_steps * args.warmup_proportion)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_train_steps)
return optimizer, scheduler, num_train_steps, num_warmup_steps
def configure_optimizer_simple(args, model, num_train_examples):
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
num_train_steps = int(num_train_examples / args.B /
args.gradient_accumulation_steps * args.epochs)
num_warmup_steps = 0
scheduler = get_constant_schedule(optimizer)
return optimizer, scheduler, num_train_steps, num_warmup_steps
def evaluate(mention_loader, model, all_candidates_embeds, k, device,
len_en_loader,
too_large=False, en_hidden_path=None):
model.eval()
if not too_large:
if hasattr(model, 'module'):
model.module.evaluate_on = True
model.module.candidates_embeds = all_candidates_embeds
else:
model.evaluate_on = True
model.candidates_embeds = all_candidates_embeds
nb_samples = 0
r_k = 0
acc = 0
with torch.no_grad():
for i, batch in tqdm(enumerate(mention_loader)):
if not too_large:
scores = model(batch[0], batch[1], None, None)
else:
scores = []
for j in range(len_en_loader):
file_path = os.path.join(en_hidden_path,
'en_hiddens_%s.pt' % j)
en_embeds = torch.load(file_path)
if hasattr(model, 'module'):
model.module.evaluate_on = True
model.module.candidates_embeds = en_embeds
else:
model.evaluate_on = True
model.candidates_embeds = en_embeds
score = model(batch[0], batch[1], None,
None).detach()
scores.append(score)
scores = torch.cat(scores, dim=1)
labels = batch[2].to(device)
top_k = scores.topk(k, dim=1)[1]
preds = top_k[:, 0]
r_k += (top_k == labels.to(device)).sum().item()
nb_samples += scores.size(0)
acc += (preds == labels.squeeze(1).to(device)).sum().item()
r_k /= nb_samples
acc /= nb_samples
if hasattr(model, 'module'):
model.module.evaluate_on = False
model.module.candidates_embeds = None
else:
model.evaluate_on = False
model.candidates_embeds = None
return r_k, acc
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main(args):
set_seeds(args)
# configure logger
logger = Logger(args.model + '.log', True)
logger.log(str(args))
# load data and initialize model and dataset
samples_train, samples_heldout_train_seen, \
samples_heldout_train_unseen, samples_val, samples_test, \
train_doc, heldout_train_doc, heldout_train_unseen_doc, \
heldout_train_unseen_doc, val_doc, test_doc = load_data(
args.data_dir)
num_rands = int(args.num_cands * args.cands_ratio)
num_hards = args.num_cands - num_rands
# get model and tokenizer
if args.pre_model == 'Bert':
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
encoder = BertModel.from_pretrained('bert-base-uncased')
elif args.pre_model == 'Roberta':
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
encoder = RobertaModel.from_pretrained('roberta-base')
else:
raise ValueError('wrong encoder type')
# encoder=MLPEncoder(args.max_len)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.type_model == 'poly':
attention_type = 'soft_attention'
else:
attention_type = 'hard_attention'
if args.type_model == 'dual':
num_mention_vecs = 1
num_entity_vecs = 1
elif args.type_model == 'multi_vector':
num_mention_vecs = 1
num_entity_vecs = args.num_entity_vecs
else:
num_mention_vecs = args.num_mention_vecs
num_entity_vecs = args.num_entity_vecs
model = UnifiedRetriever(encoder, device, num_mention_vecs, num_entity_vecs,
args.mention_use_codes, args.entity_use_codes,
attention_type, None, False)
if args.resume_training:
cpt = torch.load(args.model) if device.type == 'cuda' \
else torch.load(args.model, map_location=torch.device('cpu'))
model.load_state_dict(cpt['sd'])
dp = torch.cuda.device_count() > 1
if dp:
logger.log('Data parallel across {:d} GPUs {:s}'
''.format(len(args.gpus.split(',')), args.gpus))
model = nn.DataParallel(model)
model.to(device)
logger.log('transform train entities')
all_train_entity_token_ids, all_train_masks = transform_entities(train_doc,
args.max_len,
tokenizer)
logger.log('transform valid and test entities')
all_val_entity_token_ids, all_val_masks = transform_entities(val_doc,
args.max_len,
tokenizer)
all_test_entity_token_ids, all_test_masks = transform_entities(test_doc,
args.max_len,
tokenizer)
data = Data(train_doc, val_doc, test_doc, tokenizer,
all_train_entity_token_ids, all_train_masks,
all_val_entity_token_ids, all_val_masks,
all_test_entity_token_ids, all_test_masks, args.max_len,
samples_train, samples_val, samples_test)
train_en_loader, val_en_loader, test_en_loader, train_men_loader, \
val_men_loader, test_men_loader = data.get_loaders(args.mention_bsz,
args.entity_bsz)
model.train()
# configure optimizer
num_train_samples = len(samples_train)
if args.simpleoptim:
optimizer, scheduler, num_train_steps, num_warmup_steps \
= configure_optimizer_simple(args, model, num_train_samples)
else:
optimizer, scheduler, num_train_steps, num_warmup_steps \
= configure_optimizer(args, model, num_train_samples)
if args.resume_training:
optimizer.load_state_dict(cpt['opt_sd'])
scheduler.load_state_dict(cpt['scheduler_sd'])
# train
logger.log('***** train *****')
logger.log('# train samples: {:d}'.format(num_train_samples))
logger.log('# epochs: {:d}'.format(args.epochs))
logger.log(' batch size: {:d}'.format(args.B))
logger.log(' gradient accumulation steps {:d}'
''.format(args.gradient_accumulation_steps))
logger.log(
' effective training batch size with accumulation: {:d}'
''.format(args.B * args.gradient_accumulation_steps))
logger.log(' # training steps: {:d}'.format(num_train_steps))
logger.log(' # warmup steps: {:d}'.format(num_warmup_steps))
logger.log(' learning rate: {:g}'.format(args.lr))
logger.log(' # parameters: {:d}'.format(count_parameters(model)))
# start_time = datetime.now()
step_num = 0
tr_loss, logging_loss = 0.0, 0.0
if args.resume_training:
step_num = cpt['step_num']
tr_loss, logging_loss = cpt['tr_loss'], cpt['logging_loss']
model.zero_grad()
best_val_perf = float('-inf')
start_epoch = 1
if args.resume_training:
start_epoch = cpt['epoch'] + 1
for epoch in range(start_epoch, args.epochs + 1):
logger.log('\nEpoch {:d}'.format(epoch))
if args.type_cands == 'hard_adjusted_negative':
distribution_sampling = True
adjust_logits = True
num_cands = args.num_cands
elif args.type_cands == 'hard_negative':
distribution_sampling = True
adjust_logits = False
num_cands = args.num_cands
elif args.type_cands == 'mixed_negative':
distribution_sampling = False
adjust_logits = False
num_cands = num_hards
else:
raise ValueError('type candidates wrong')
if args.type_cands == 'mixed_negative' and num_hards == 0:
candidates = None
else:
all_train_cands_embeds = get_all_entity_hiddens(train_en_loader,
model,
args.store_en_hiddens,
args.en_hidden_path)
candidates = get_hard_negative(train_men_loader, model, num_cands,
len(train_en_loader), device,
distribution_sampling,
args.exclude_golds,
args.store_en_hiddens,
all_train_cands_embeds,
args.en_hidden_path,
adjust_logits, args.smoothing_value)
train_set = ZeshelDataset(tokenizer, samples_train, train_doc,
args.max_len,
candidates, device, num_rands,
args.type_cands,
all_train_entity_token_ids,
all_train_masks)
train_loader = DataLoader(train_set, args.B, shuffle=True,
drop_last=False)
for step, batch in enumerate(train_loader):
model.train()
loss = model(*batch)[0]
if len(args.gpus) > 1:
loss = loss.mean()
loss.backward()
tr_loss += loss.item()
# print('loss is %f' % loss.item())
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
model.zero_grad()
step_num += 1
if step_num % args.logging_steps == 0:
avg_loss = (tr_loss - logging_loss) / args.logging_steps
logger.log('Step {:10d}/{:d} | Epoch {:3d} | '
'Batch {:5d}/{:5d} | '
'Average Loss {:8.4f}'
''.format(step_num, num_train_steps,
epoch, step + 1,
len(train_loader), avg_loss))
logging_loss = tr_loss
# eval_train_result = evaluate(train_loader, model, args.k,device)[0]
all_val_cands_embeds = get_all_entity_hiddens(val_en_loader, model,
args.store_en_hiddens,
args.en_hidden_path)
eval_result = evaluate(val_men_loader, model, all_val_cands_embeds,
args.k, device, len(val_en_loader),
args.store_en_hiddens, args.en_hidden_path)
logger.log('Done with epoch {:3d} | train loss {:8.4f} | '
'validation recall {:8.4f}'
'|validation accuracy {:8.4f}'.format(
epoch,
tr_loss / step_num,
eval_result[0],
eval_result[1]
))
save_model = (eval_result[0] >= best_val_perf) if args.eval_criterion \
== 'recall' else \
(eval_result[1] >= best_val_perf)
if save_model:
logger.log('------- new best val perf: {:g} --> {:g} '
''.format(best_val_perf, eval_result[0]))
best_val_perf = eval_result[0]
torch.save({'opt': args,
'sd': model.module.state_dict() if dp else model.state_dict(),
'perf': best_val_perf, 'epoch': epoch,
'opt_sd': optimizer.state_dict(),
'scheduler_sd': scheduler.state_dict(),
'tr_loss': tr_loss, 'step_num': step_num,
'logging_loss': logging_loss},
args.model)
else:
logger.log('')
# update dataset and dataloader
# torch.cuda.empty_cache()
# torch.cuda.empty_cache()
# test model on test dataset
package = torch.load(args.model) if device.type == 'cuda' \
else torch.load(args.model, map_location=torch.device('cpu'))
new_state_dict = package['sd']
# encoder=MLPEncoder(args.max_len)
if args.pre_model == 'Bert':
encoder = BertModel.from_pretrained('bert-base-uncased')
elif args.pre_model == 'Roberta':
encoder = RobertaModel.from_pretrained('roberta-base')
else:
raise ValueError('wrong encoder type')
model = UnifiedRetriever(encoder, device, num_mention_vecs, num_entity_vecs,
args.mention_use_codes, args.entity_use_codes,
attention_type, None, False)
model.load_state_dict(new_state_dict)
if dp:
logger.log('Data parallel across {:d} GPUs {:s}'
''.format(len(args.gpus.split(',')), args.gpus))
model = nn.DataParallel(model)
model.to(device)
model.eval()
all_test_cands_embeds = get_all_entity_hiddens(test_en_loader, model,
args.store_en_hiddens,
args.en_hidden_path)
test_result = evaluate(test_men_loader, model, all_test_cands_embeds,
args.k, device, len(test_en_loader),
args.store_en_hiddens, args.en_hidden_path)
logger.log(' test recall@{:d} : {:8.4f}'
'| test accuracy : {:8.4f}'.format(args.k, test_result[0],
test_result[1]))
# test_train_result = evaluate(train_loader, model,args)
# logger.log('test train acc {:f}'.format(test_train_result))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str,
help='model path')
parser.add_argument('--resume_training', action='store_true',
help='resume training from checkpoint?')
parser.add_argument('--max_len', type=int, default=128,
help='max length of the mention input '
'and the entity input')
parser.add_argument('--num_hards', type=int, default=10,
help='the number of the nearest neighbors we use to '
'construct hard negatives')
parser.add_argument('--type_cands', type=str,
default='mixed_negative',
choices=['mixed_negative',
'hard_negative',
'hard_adjusted_negative'],
help='the type of negative we use during training')
parser.add_argument('--data_dir', type=str,
help='the data directory')
parser.add_argument('--B', type=int, default=128,
help='the batch size')
parser.add_argument('--lr', type=float, default=2e-5,
choices=[5e-6, 1e-5, 2e-5, 5e-5, 2e-4, 5e-4, 0.002,
0.001],
help='the learning rate')
parser.add_argument('--epochs', type=int, default=3,
help='the number of training epochs')
parser.add_argument('--k', type=int, default=64,
help='recall@k when evaluate')
parser.add_argument('--warmup_proportion', type=float, default=0.1,
help='proportion of training steps to perform linear '
'learning rate warmup for [%(default)g]')
parser.add_argument('--weight_decay', type=float, default=0.01,
help='weight decay [%(default)g]')
parser.add_argument('--adam_epsilon', type=float, default=1e-6,
help='epsilon for Adam optimizer [%(default)g]')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help='num gradient accumulation steps [%(default)d]')
parser.add_argument('--seed', type=int, default=42,
help='random seed [%(default)d]')
parser.add_argument('--num_workers', type=int, default=0,
help='num workers [%(default)d]')
parser.add_argument('--simpleoptim', action='store_true',
help='simple optimizer (constant schedule, '
'no weight decay?')
parser.add_argument('--clip', type=float, default=1,
help='gradient clipping [%(default)g]')
parser.add_argument('--logging_steps', type=int, default=1000,
help='num logging steps [%(default)d]')
parser.add_argument('--gpus', default='', type=str,
help='GPUs separated by comma [%(default)s]')
parser.add_argument('--eval_criterion', type=str, default='recall',
choices=['recall', 'accuracy'],
help='the criterion for selecting model')
parser.add_argument('--pre_model', default='Bert',
choices=['Bert', 'Roberta'],
type=str, help='the encoder for train')
parser.add_argument('--cands_ratio', default=1.0, type=float,
help='the ratio between random candidates and hard '
'candidates')
parser.add_argument('--num_cands', default=128, type=int,
help='the total number of candidates')
parser.add_argument('--smoothing_value', default=1, type=float,
help=' smoothing factor when sampling negatives '
'according to model distribution')
parser.add_argument('--eval_batchsize', type=int, default=512,
help='the batch size')
parser.add_argument('--mention_bsz', type=int, default=512,
help='the batch size')
parser.add_argument('--entity_bsz', type=int, default=512,
help='the batch size')
parser.add_argument('--exclude_golds', action='store_true',
help='exclude golds when sampling?')
parser.add_argument('--type_model', type=str,
default='dual',
choices=['dual',
'sum_max',
'multi_vector',
'poly'],
help='the type of model')
parser.add_argument('--num_mention_vecs', type=int, default=8,
help='the number of mention vectors ')
parser.add_argument('--num_entity_vecs', type=int, default=8,
help='the number of entity vectors ')
parser.add_argument('--mention_use_codes', action='store_true',
help='use codes for mention embeddings?')
parser.add_argument('--entity_use_codes', action='store_true',
help='use codes for entity embeddings?')
parser.add_argument('--en_hidden_path', type=str,
help='all entity hidden states path')
parser.add_argument('--store_en_hiddens', action='store_true',
help='store entity hiddens?')
args = parser.parse_args()
# Set environment variables before all else.
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus # Sets torch.cuda behavior
main(args)
|
#!/usr/bin/env python3
import argparse
import random
import biotools as bt
# setup
parser = argparse.ArgumentParser(
description='Generates random FASTA files.')
# required arguments
parser.add_argument('--count', required=True, type=int,
metavar='<int>', help='number of sequences')
parser.add_argument('--min', required=True, type=int,
metavar='<int>', help='minimum sequence length')
parser.add_argument('--max', required=True, type=int,
metavar='<int>', help='maximum sequence length')
parser.add_argument('--gc', required=True, type=float,
metavar='<float>', help='the gc composition')
parser.add_argument('--prefix', required=False, type=str, default='seq',
metavar='<str>', help='number of sequences')
parser.add_argument('--verbose', action='store_true',
help='on/off switch')
# finalization
arg = parser.parse_args()
for i in range(arg.count):
print(f'>{arg.prefix}-{i}')
slen = random.randint(arg.min, arg.max)
print(bt.randseq(slen, arg.gc))
|
"""
@name: PyHouse/src/Modules/Computer/Web/_test/test_web_schedules.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on Nov 23, 2016
@summary: Test
"""
__updated__ = '2020-02-14'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
from twisted.internet.defer import succeed
from twisted.web import server
from twisted.web.test.test_web import DummyRequest
# Import PyMh files and modules.
from test.xml_data import XML_LONG, TESTING_PYHOUSE
from test.testing_mixin import SetupPyHouseObj
from Modules.Housing.test.xml_housing import \
TESTING_HOUSE_NAME, \
TESTING_HOUSE_ACTIVE, \
TESTING_HOUSE_KEY, \
TESTING_HOUSE_UUID
# from Modules.Core.Utilities.debug_tools import PrettyFormatAny
JSON = '{ Active : false, \
DayOfWeek : 127, \
Key : 5, \
Level : 50, \
LightName : "MBR Rope", \
LightUUID : "1efbce9e-4618-11e6-89e7-74da3859e09a", \
Name :"Evening-05". \
Rate : 0, \
RoomName : "Master Bed", \
RoomUUID : "1efc19d0-4618-11e6-89e7-74da3859e09a", \
ScheduleMode : "Home", \
ScheduleType : "Lighting", \
Time : "sunset + 00:15", \
UUID : null, \
_AddFlag : false, \
_DeleteFlag : false \
}'
JSON2 = {"Add":"false",
"Delete":"false",
"Name":"Schedule 0",
"Key":"0",
"Active":"true",
"ScheduleType":"Lighting",
"Time":"13:34",
"DayOfWeek":"127",
"ScheduleMode":"Home",
"Level":"100",
"Rate":"0",
"RoomName":"Master Bath",
"LightName":"Light, Insteon (xml_lights)"}
class SetupMixin(object):
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class SmartDummyRequest(DummyRequest):
def __init__(self, method, url, args=None, headers=None):
DummyRequest.__init__(self, url.split('/'))
self.method = method
self.headers.update(headers or {})
# set args
args = args or {}
for k, v in args.items():
self.addArg(k, v)
def value(self):
return "".join(self.written)
class DummySite(server.Site):
def get(self, url, args=None, headers=None):
return self._request("GET", url, args, headers)
def post(self, url, args=None, headers=None):
return self._request("POST", url, args, headers)
def _request(self, method, url, args, headers):
request = SmartDummyRequest(method, url, args, headers)
resource = self.getResourceFor(request)
result = resource.render(request)
return self._resolveResult(request, result)
def _resolveResult(self, request, result):
if isinstance(result, str):
request.write(result)
request.finish()
return succeed(request)
elif result is server.NOT_DONE_YET:
if request.finished:
return succeed(request)
else:
return request.notifyFinish().addCallback(lambda _: request)
else:
raise ValueError("Unexpected return value: %r" % (result,))
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_web_schedules')
class A1_Setup(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Tags(self):
""" Be sure that the XML contains the right stuff.
"""
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')
self.assertEqual(self.m_xml.web_sect.tag, 'WebSection')
# print(PrettyFormatAny.form(self.m_xml.web_sect, 'XML'))
class A2_XML(SetupMixin, unittest.TestCase):
""" Now we _test that the xml_xxxxx have set up the XML_LONG tree properly.
"""
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_HouseDiv(self):
""" Test
"""
l_xml = self.m_xml.house_div
# print(PrettyFormatAny.form(l_xml, 'A2-01-A - House'))
self.assertEqual(l_xml.attrib['Name'], TESTING_HOUSE_NAME)
self.assertEqual(l_xml.attrib['Active'], TESTING_HOUSE_ACTIVE)
self.assertEqual(l_xml.attrib['Key'], TESTING_HOUSE_KEY)
self.assertEqual(l_xml.find('UUID').text, TESTING_HOUSE_UUID)
class B01_JSON(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_xxx(self):
_l_dev = 1
class C02_XML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_find_xml(self):
""" Be sure that the XML contains the right stuff.
"""
def test_11_ReadXML(self):
l_web = webXml.read_web_xml(self.m_pyhouse_obj)
self.m_pyhouse_obj.Computer.Logs = l_web
self.assertEqual(l_web.WebPort, 8580)
def test_21_WriteXML(self):
_l_web = webXml.read_web_xml(self.m_pyhouse_obj)
_l_xml = webXml.write_web_xml(self.m_pyhouse_obj)
# ## END DBK
|
# rawmanifest.py
#
# Copyright 2013-2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Output (a subset of) the manifest as a bser serialisation
Outputs raw manifest data
The manifest can be updated with the current working copy state.
"""
from __future__ import absolute_import
from mercurial import (
cmdutil,
node,
)
from itertools import chain
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
@command(
'rawmanifest', [
['o', 'output', '', 'direct output to a file rather than STDOUT',
'FILE'],
['d', 'deletions', None,
'add deleted entries (hash set to nullid and flag set to "d")']],
'[PREFIX]')
def rawmanifest(ui, repo, *args, **opts):
"""Output the raw manifest (optionally updated with working copy status)"""
manifestnode = repo['.'].manifestnode()
rawmanifest = repo.manifestlog._revlog.revision(manifestnode)
try:
output = None
if opts['output']:
output = open(opts['output'], 'wb')
else:
output = ui
output.write(rawmanifest)
if opts['deletions']:
deletedline = '\x00{0}d\n'.format(node.hex(node.nullid))
status = repo.status()
for filename in chain(status.removed, status.deleted):
output.write(filename)
output.write(deletedline)
finally:
if opts['output'] and output is not None:
output.close()
|
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.util import format_datatables_data
from custom.up_nrhm.sql_data import ASHAFacilitatorsData
from django.utils.translation import ugettext as _, ugettext_noop
class ASHAFacilitatorsReport(GenericTabularReport, DatespanMixin, CustomProjectReport):
name = ugettext_noop("Format-2 Consolidation of the Functionality numbers")
slug = "asha_facilitators_report"
no_value = '--'
@property
def report_config(self):
return {
'domain': self.domain,
'startdate': self.datespan.startdate,
'enddate': self.datespan.enddate.replace(hour=23, minute=59, second=59),
'af': self.request.GET.get('hierarchy_af'),
'is_checklist': 1
}
@property
def model(self):
return ASHAFacilitatorsData(config=self.report_config)
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn('', sortable=False),
DataTablesColumn(_('Total no. of ASHAs functional'), sortable=False),
DataTablesColumn(_('Total no. of ASHAs who did not report/not known'), sortable=False),
DataTablesColumn(_('Remarks'), sortable=False),
)
@property
def rows(self):
def format_val(val):
return self.no_value if val is None else val
model = self.model
model_data = model.data
total = model.columns[0].get_raw_value(model_data)
reporting = model.columns[1].get_raw_value(model_data)
not_reporting = total - (reporting or 0)
return ([[
column.header,
format_val(column.get_value(model_data)),
format_datatables_data(not_reporting, not_reporting),
''
] for column in model.columns[2:]], format_val(total), format_val(reporting))
|
from tqdm import tqdm
import requests
import time
import click
import re
def download_video(url, index, filename):
"""
Downloads a video and saves it by its name plus index for easy sorting
"""
print("\n" + filename + "\n")
maximum_retries = 5
with open(f"{index}-{filename}.mp4", 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
url, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("progress")
pbar.update(len(chunk))
pbar.close()
print("\n")
def download_exercises(links):
"""
Downloads exercises
"""
maximum_retries = 5
click.echo(
click.style(f"Downloading exercise files..." + "\n", fg="green"))
for link in links:
filename = re.split("exercises/(.+).zip", link)[1]
with open(f"{filename}.zip", 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
link, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("progress")
pbar.update(len(chunk))
pbar.close()
print("\n")
|
from utils.search import BaiduEngine
from utils.constant import we_load_dict, load_ap
import csv
import multiprocessing
import time
import redis
import ujson
key_set, company_suffix = we_load_dict()
ap = load_ap()
redis_host = '172.18.18.173'
redis_db = 2
pool = redis.ConnectionPool(host=redis_host, port=6379,
decode_responses=True, db=redis_db)
def get_redis_hander():
r = redis.Redis(connection_pool=pool)
return r
def load_data():
fn = "aa.csv"
res = []
i = 0
count = 2
tel_set = set()
with open(fn, "r", encoding='utf8') as fh:
cr = csv.reader(fh)
for line in cr:
tel = line[0].strip()
tel_set.add(tel)
res.append(tel)
i+=1
if i >= count:
break
return res
def parse_one(iq):
global key_set, company_suffix, ap
r = get_redis_hander()
while True:
try:
tel = iq.get()
if tel is None:
break
bd = BaiduEngine(tel, key_set=key_set, company_suffix=company_suffix, ap=ap)
bd.parse()
r.set(tel, ujson.dumps(bd.ret_res))
except Exception as e:
print(e)
def run(data):
st = time.time()
r = get_redis_hander()
r.flushdb()
thread_size = 8
q_list = [multiprocessing.Queue() for i in range(thread_size)]
t_list = [multiprocessing.Process(target=parse_one, args=(q,)) for q in q_list]
for t in t_list:
t.start()
i = 0
c = 0
for tel in data:
q_list[0].put(tel)
c += 1
i = (i+1) % thread_size
print("put done")
for q in q_list:
q.put(None)
print("terminate done")
for t in t_list:
t.join()
print("join done")
deal_ret = []
all_key = r.keys('*')
for k in all_key:
v = r.get(k)
ko = ujson.loads(v)
tel = ko.get('i_tel')
all_name = ko.get('all_find_companys', '')
query_url = ko.get('query_url', '')
time_cost = ko.get('time_cost')
all_name = str(all_name)
deal_ret.append((tel, query_url, all_name, time_cost))
print("redis get done")
with open("out.csv", 'w', encoding='utf8', newline='') as wh:
cw = csv.writer(wh)
cw.writerows(deal_ret)
print("write file done")
print(f"time cost:{time.time() - st}")
if __name__ == "__main__":
input_data = load_data()
run(input_data) |
from numpy import array, ndarray
from datagears.core.network import Depends, Maybe
def add(a: int, b: int = 10) -> int:
return a + b
def reduce(c1: int, sum: Maybe[int] = Depends(add)) -> int:
result = sum - c1
return result
def add_one() -> int:
return 1
def my_out(reduced: Maybe[int] = Depends(reduce), add_one: Maybe[int] = Depends(add_one)) -> ndarray:
result = [reduced, add_one]
return array(result)
|
from collections import Counter
from sys import *
from collections import defaultdict as dd
from math import *
# sys.stdin = open('input.txt', 'r')
# sys.stdout = open('output.txt', 'w')
#input functions
def inp():
return int(stdin.readline().strip())
def vinp():
return map(int,stdin.readline().strip().split())
def linp():
return list(map(int,stdin.readline().strip().split()))
def sinp(n = 1):
if n==1:
return stdin.readline().strip()
elif n==2:
return list(stdin.readline().strip())
else:
return list(stdin.readline().split())
#output function
def pr(*x, end = "\n"):
print(*x,end = end)
#others
def mod(f, val = 1000000007):
return f % val
def csort(c):
sorted(c.items(), key=lambda pair: pair[1], reverse=True)
def indc(l,n):
c={}
for i in range(n):
c[l[i]]=c.get(l[i],[])+[i+1]
return c
if __name__ =="__main__":
a,b,c = vinp()
k = linp()
l = [0 for i in range(c)]
l2 = [[] for i in range(c)]
p = True
for i in range(a):
n = k[i]%c
l[n-1]+=1
l2[n-1].append(i)
for i in range(c):
if l[i]>=b:
pr("Yes")
p =False
for j in range(b):
pr(k[l2[i][j]],end=" ")
break
if p:
pr("No")
else:
pr() |
from functools import partial
import logging
import os
from threading import Thread
import time
from prometheus_client import start_http_server, Counter, Gauge, Histogram
# Note: code below contains additional imports called only when feature is enabled
# TODO metric declarations should be based on enabled sensors => not hardcoded, not global
TEMPERATURE = Gauge('enviro_temperature_celsius','Temperature')
PRESSURE = Gauge('enviro_pressure_pascals','Pressure')
HUMIDITY = Gauge('enviro_relative_humidity','Relative humidity')
LIGHT = Gauge('enviro_light_lux', 'Ambient light level')
PROXIMITY = Gauge('enviro_proximity_raw', 'Raw proximity value, with larger numbers being closer and vice versa')
# TODO don't report gas and PM on http prometheus exporter if we are in Enviro device mode
GAS_RED = Gauge('enviro_gas_red_ohms', 'Gas RED sensor: CO, H2S, Ethanol, Hydrogen, Ammonia, Methane, Propane, Iso-butane')
GAS_OX = Gauge('enviro_gas_ox_ohms','Gas OX sensor: NO2, NO, Hydrogen')
GAS_NH3 = Gauge('enviro_gas_nh3_ohms', 'Gas NH3 sensor: Hydrogen, Ethanol, Amonia, Propane, Iso-butane')
PM1 = Gauge('enviro_pm_1u', 'Particulate Matter of diameter less than 1 micron. Measured in micrograms per cubic metre (ug/m3)')
PM25 = Gauge('enviro_pm_2u5', 'Particulate Matter of diameter less than 2.5 microns. Measured in micrograms per cubic metre (ug/m3)')
PM10 = Gauge('enviro_pm_10u', 'Particulate Matter of diameter less than 10 microns. Measured in micrograms per cubic metre (ug/m3)')
GAS_RED_HIST = Histogram('enviro_gas_red_hist_ohms', 'Histogram of gas RED measurements', buckets=tuple(range(100_000, 1_500_000 + 1, 100_000)))
GAS_OX_HIST = Histogram('enviro_gas_ox_hist_ohms', 'Histogram of gas OX measurements', buckets=tuple(range(5_000, 100_000 + 1, 5_000)))
GAS_NH3_HIST = Histogram('enviro_gas_nh3_hist_ohms', 'Histogram of gas NH3 measurements', buckets=tuple(range(100_000, 2_000_000 + 1, 100_000)))
PM1_HIST = Histogram('enviro_pm_1u_hist', 'Histogram of Particulate Matter of diameter less than 1 micron', buckets=tuple(range(5, 100 + 1, 5)))
PM25_HIST = Histogram('enviro_pm_2u5_hist', 'Histogram of Particulate Matter of diameter less than 2.5 microns', buckets=tuple(range(5, 100 + 1, 5)))
PM10_HIST = Histogram('enviro_pm_10u_hist', 'Histogram of Particulate Matter of diameter less than 10 microns', buckets=tuple(range(5, 100 + 1, 5)))
LOOP_UPDATE_TIME = Counter('enviro_update_time_seconds', 'Cumulative time spent in sensor values update.')
ERROR_COUNTER = Counter('enviro_errors', 'Counter of processing errors. E.g. failed sensor value updates.')
def update_prometheus_metrics(enviro, values, sensor_error=False):
# TODO update metric values atomically (report on http whole set, not mix old/new)
# (create own registry serving one defined values dict, see https://github.com/prometheus/client_python#custom-collectors)
if sensor_error:
ERROR_COUNTER.inc()
TEMPERATURE.set(values['temperature_celsius'])
PRESSURE.set(values['pressure_pascals'])
HUMIDITY.set(values['relative_humidity'])
LIGHT.set(values['light_lux'])
PROXIMITY.set(values['proximity_raw'])
if not enviro:
GAS_RED.set(values['gas_red_ohms'])
GAS_OX.set(values['gas_ox_ohms'])
GAS_NH3.set(values['gas_nh3_ohms'])
GAS_RED_HIST.observe(values['gas_red_ohms'])
GAS_OX_HIST.observe(values['gas_ox_ohms'])
GAS_NH3_HIST.observe(values['gas_nh3_ohms'])
PM1.set(values['pm_1u'])
PM25.set(values['pm_2u5'])
PM10.set(values['pm_10u'])
PM1_HIST.observe(values['pm_1u'])
PM25_HIST.observe(values['pm_2u5'])
PM10_HIST.observe(values['pm_10u'])
# TODO update LOOP_UPDATE_TIME ... for now it is updated externally in some hardcoded code
def _collect_all_data():
"""Collects all the data currently set"""
return {
'temperature': TEMPERATURE.collect()[0].samples[0].value,
'pressure': PRESSURE.collect()[0].samples[0].value,
'humidity': HUMIDITY.collect()[0].samples[0].value,
'light': LIGHT.collect()[0].samples[0].value,
'proximity': PROXIMITY.collect()[0].samples[0].value,
'gas_red': GAS_RED.collect()[0].samples[0].value,
'gas_ox': GAS_OX.collect()[0].samples[0].value,
'gas_nh3': GAS_NH3.collect()[0].samples[0].value,
'pm1': PM1.collect()[0].samples[0].value,
'pm25': PM25.collect()[0].samples[0].value,
'pm10': PM10.collect()[0].samples[0].value
}
def _post_loop_to_influxdb(influxdb_api, create_point, time_between_posts, bucket):
"""Post all sensor data to InfluxDB"""
while True:
time.sleep(time_between_posts)
data_points = [ create_point().field(name, value) for name, value in _collect_all_data().items() ]
try:
influxdb_api.write(bucket=bucket, record=data_points)
logging.debug('InfluxDB response: OK')
except Exception as exception:
logging.error('Exception sending to InfluxDB: {}'.format(exception))
def _post_loop_to_luftdaten(sensor_uid, time_between_posts):
"""
Post relevant sensor data to luftdaten.info
Code from: https://github.com/sepulworld/balena-environ-plus
"""
import requests
def post_pin_values(pin, values):
return requests.post('https://api.luftdaten.info/v1/push-sensor-data/',
json={
"software_version": "prometheus-enviro-exporter 0.0.1",
"sensordatavalues": [{"value_type": key, "value": val} for key, val in values.items()]
},
headers={
"X-PIN": pin,
"X-Sensor": sensor_uid,
"Content-Type": "application/json",
"cache-control": "no-cache"
}
)
while True:
time.sleep(time_between_posts)
try:
sensor_data = _collect_all_data()
response_pin_1 = post_pin_values("1", {
"P2": sensor_data['pm25'],
"P1": sensor_data['pm10']
})
response_pin_11 = post_pin_values("11", {
"temperature": "{:.2f}".format(sensor_data['temperature']),
"pressure": "{:.2f}".format(sensor_data['pressure']),
"humidity": "{:.2f}".format(sensor_data['humidity'] * 100)
})
if response_pin_1.ok and response_pin_11.ok:
logging.debug('Luftdaten response: OK')
else:
logging.error('Luftdaten response: Failed')
except Exception as exception:
logging.error('Exception sending to Luftdaten: {}'.format(exception))
def _get_serial_number():
"""Get Raspberry Pi serial number"""
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if line[0:6] == 'Serial':
return str(line.split(":")[1].strip())
def _str_to_bool(value):
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
def add_exporter_arguments(parser):
parser.add_argument('--prometheus-port', metavar='PORT', default=9848, type=int,
help='Port of the Prometheus exporter HTTP server.')
parser.add_argument('--prometheus-ip', metavar='IP', default='0.0.0.0',
help='IP address where the Prometheus exporter HTTP server should be available. By default bind on all available network interfaces.')
parser.add_argument("-i", "--influxdb", metavar='INFLUXDB', type=_str_to_bool, default='false',
help="Post sensor data to InfluxDB")
parser.add_argument("-l", "--luftdaten", metavar='LUFTDATEN', type=_str_to_bool, default='false',
help="Post sensor data to Luftdaten")
def create_exporters(args, enviro=False):
"""
Creates exporters from parsed arguments and starts exports.
Returns:
Function accepting mapping type with name and value pairs of sensor values.
"""
# TODO replace InfluxDB code with standalone exporter not related to prometheus exporter or metrics
if args.influxdb:
logging.info("Starting InfluxDB client and posting loop")
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
influxdb_client = InfluxDBClient(
url=os.getenv('INFLUXDB_URL', ''),
token=os.getenv('INFLUXDB_TOKEN', ''), # You can generate an InfluxDB Token from the Tokens Tab in the InfluxDB Cloud UI
org=os.getenv('INFLUXDB_ORG_ID', '')
)
influxdb_api = influxdb_client.write_api(write_options=SYNCHRONOUS)
sensor_location = os.getenv('INFLUXDB_SENSOR_LOCATION', 'Adelaide')
def create_point():
return Point('enviro').tag('location', sensor_location)
influx_thread = Thread(target=_post_loop_to_influxdb, args=(
influxdb_api, create_point,
int(os.getenv('INFLUXDB_TIME_BETWEEN_POSTS', '5')),
os.getenv('INFLUXDB_BUCKET', ''),
))
influx_thread.start()
# TODO replace Luftdaten code with standalone exporter not related to prometheus exporter or metrics
if args.luftdaten:
LUFTDATEN_TIME_BETWEEN_POSTS = int(os.getenv('LUFTDATEN_TIME_BETWEEN_POSTS', '30'))
LUFTDATEN_SENSOR_UID = 'raspi-' + _get_serial_number()
logging.info("Sensor data will be posted to Luftdaten every {} seconds for the UID {}".format(LUFTDATEN_TIME_BETWEEN_POSTS, LUFTDATEN_SENSOR_UID))
luftdaten_thread = Thread(target=_post_loop_to_luftdaten, args=(LUFTDATEN_SENSOR_UID, LUFTDATEN_TIME_BETWEEN_POSTS))
luftdaten_thread.start()
logging.info("Prometheus exporter listening on http://{}:{}".format(args.prometheus_ip, args.prometheus_port))
start_http_server(addr=args.prometheus_ip, port=args.prometheus_port)
return partial(update_prometheus_metrics, enviro)
|
#!/usr/bin/env python
import re
import requests
import requests_html
import string
# Queries for the MediaWiki backend.
# Documentation here: https://www.mediawiki.org/wiki/API:Categorymembers
CATEGORY = "Category:Japanese_katakana"
LIMIT = 500
INITIAL_QUERY = f"https://en.wiktionary.org/w/api.php?action=query&format=json&list=categorymembers&cmtitle={CATEGORY}&cmlimit={LIMIT}"
CONTINUE_TEMPLATE = string.Template(INITIAL_QUERY + "&cmcontinue=$cmcontinue")
# Selects the content on the page.
PAGE_TEMPLATE = string.Template("https://en.wiktionary.org/wiki/$word")
SELECTOR = 'b[class="Latn form-of lang-ja romanized-form-of"]'
def _print_data(data):
session = requests_html.HTMLSession()
for member in data["query"]["categorymembers"]:
katakana = member["title"]
# Skips examples starting or ending with a dash.
if katakana.startswith("-") or katakana.endswith("-"):
continue
# Skips examples containing digits.
if re.search(r"\d", katakana):
continue
query = PAGE_TEMPLATE.substitute(word=katakana)
got = session.get(query).html.find(SELECTOR, first=True)
if not got:
continue
romaji = got.text
# Skips multiword examples.
if " " in romaji:
continue
if romaji.endswith(")") or romaji.endswith(","):
romaji = romaji[:-1]
# Skips examples starting or ending with a dash.
if romaji.startswith("-") or romaji.endswith("-"):
continue
romaji = romaji.casefold()
print(f"{katakana}\t{romaji}")
def main():
data = requests.get(INITIAL_QUERY).json()
_print_data(data)
code = data["continue"]["cmcontinue"]
next_query = CONTINUE_TEMPLATE.substitute(cmcontinue=code)
while True:
data = requests.get(next_query).json()
_print_data(data)
# Then this is the last one.
if not "continue" in data:
break
code = data["continue"]["cmcontinue"]
next_query = CONTINUE_TEMPLATE.substitute(cmcontinue=code)
if __name__ == "__main__":
main()
|
"""Provide utilities"""
import ast
import inspect
import sys
import warnings
from contextlib import contextmanager
from enum import Enum, auto
from functools import lru_cache, singledispatch
from types import FrameType
from typing import Any, Callable, Generator, Mapping, Tuple
from diot import Diot
from executing import Source
from pure_eval import CannotEval, Evaluator
from .context import ContextAnnoType
DATA_CONTEXTVAR_NAME = "__pipda_context_data__"
options = Diot(
# Warn about failure to get ast node
warn_astnode_failure=True,
# All piping mode:
# - Assume all verbs are using PIPING_VERB env
# - Assume all data functions are using PIPING env
# - Assume all non-data functions are using PIPING verbs
# This is useful when source code is not available.
assume_all_piping=False,
)
@contextmanager
def options_context(**kwargs: Mapping[str, Any]) -> Generator:
"""A context manager to execute code with temporary options"""
tmp_opts = options.copy()
options.update(**kwargs)
yield
options.update(tmp_opts)
class InaccessibleToNULLException(Exception):
"""Raises when access to NULLClass object"""
class NULLClass:
"""Sometimes, None is a valid option. In order to distinguish this
situation, NULL is used for a default.
It is also used as data to fast evaluate FastEvalFunction and FastEvalVerb
objects. If failed, InaccessibleToNULLException will be raised.
"""
def __repr__(self) -> str:
"""String representation"""
return "NULL"
def _inaccessible(self, *args: Any, **kwargs: Any) -> Any:
raise InaccessibleToNULLException
__bool__ = _inaccessible
__len__ = _inaccessible
__getitem__ = _inaccessible
__getattr__ = _inaccessible
# more ?
NULL = NULLClass()
class CallingEnvs(Enum):
"""Types of piping/calling envs"""
# When a function works as an argument of a verb calling
# data >> verb(func())
# ^^^^^^
# Or
# verb(data, func())
# ^^^^^^
PIPING = auto()
# When I am the verb in piping syntax
# data >> verb(...)
# ^^^^^^^^^
PIPING_VERB = auto()
# # When I am an argument of any function not in a piping syntax
# # func(x=func2())
# # ^^^^^^^
# FUNC_ARG = auto()
# Used to pass to the functions manually
REGULAR = auto()
class DataEnv:
"""A data context that can be accessed by the function registered by
`pipda.register_*` so that the data argument doesn't need to
be passed when called
Args:
data: The data to be attached to the context
"""
def __init__(self, data: Any, name: str = DATA_CONTEXTVAR_NAME) -> None:
self.name = name
self.data = data
def get(self) -> Any:
"""Get the data"""
return self.data
def set(self, data: Any) -> None:
"""Update the data"""
self.data = data
def delete(self) -> None:
"""Delete the attached data"""
self.set(NULL)
def get_env_data(frame: FrameType) -> Any:
"""Check and return if there is a data set in the context where
the verb or function is called
The data has to be named as `_`
"""
envdata = frame.f_locals.get("_", None)
if not isinstance(envdata, DataEnv) or envdata.name != DATA_CONTEXTVAR_NAME:
return NULL
return envdata.get()
def calling_env(funtype: str) -> Any:
"""Checking how the function is called:
1. PIPING_VERB: It is a verb that is piped directed. ie. data >> verb(...)
2. PIPING: It is a function called as (part of) the argument
of a piping verb. ie.:
>>> data >> verb(func(...))
Note that `func` here could also be a verb. When a function is called
inside a lambda body, it should not be counted in this situation:
>>> data >> verb(lambda: func(...))
In this case, func should be called as normal function.
This function should return `None`
3. FUNC_ARG: It is an argument of any function call
4. None: None of the above situation fits
This function should be only called inside register_*.wrapper
"""
if options.assume_all_piping:
return (
CallingEnvs.PIPING_VERB
if funtype == 'Verb'
else CallingEnvs.PIPING
)
# frame 1: register_*.wrapper
# frame 2: func(...)
frame = sys._getframe(2)
my_node = Source.executing(frame).node
if not my_node and options.warn_astnode_failure:
warnings.warn(
"Failed to fetch the node calling the function, "
"call it with the original function."
)
return None
piping_verb_node = _get_piping_verb_node(my_node)
if piping_verb_node is my_node and piping_verb_node is not None:
return CallingEnvs.PIPING_VERB
if _is_piping_verb_argument_node(my_node, piping_verb_node):
return CallingEnvs.PIPING
parent_call_node = _argument_node_of(my_node)
if parent_call_node is None:
return None
# check if parent call node is a function registered by
# register_verb/register_func
evaluator = Evaluator.from_frame(frame)
try:
func = evaluator[parent_call_node.func]
except CannotEval: # pragma: no cover
return None
if functype(func) != "plain":
return CallingEnvs.PIPING
return None
def evaluate_expr(expr: Any, data: Any, context: ContextAnnoType) -> Any:
"""Evaluate a mixed expression"""
if isinstance(context, Enum):
context = context.value
if hasattr(expr.__class__, "_pipda_eval"):
# Not only for Expression objects, but also
# allow customized classes
return expr._pipda_eval(data, context)
if isinstance(expr, (tuple, list, set)):
# In case it's subclass
return expr.__class__(
(evaluate_expr(elem, data, context) for elem in expr)
)
if isinstance(expr, slice):
return slice(
evaluate_expr(expr.start, data, context),
evaluate_expr(expr.stop, data, context),
evaluate_expr(expr.step, data, context),
)
if isinstance(expr, dict):
return expr.__class__(
{
key: evaluate_expr(val, data, context)
for key, val in expr.items()
}
)
return expr
@singledispatch
def has_expr(expr: Any) -> bool:
"""Check if expr has any Expression object in it"""
from .expression import Expression
return isinstance(expr, Expression)
@has_expr.register(tuple)
@has_expr.register(list)
@has_expr.register(set)
def _(expr: Any) -> Any:
return any(has_expr(elem) for elem in expr)
@has_expr.register(slice)
def _(expr: Any) -> Any:
return has_expr((expr.start, expr.stop, expr.step))
@has_expr.register(dict)
def _(expr: Any) -> Any:
return any(has_expr(elem) for elem in expr.values())
def functype(func: Callable) -> str:
"""Check the type of the function
Args:
func: A function
Returns:
The type of the function
- verb: A verb that is registered by `register_verb`
- func: A function that is registered by `register_func`, with
data as the first argument
- plain-func: A function that is registered by `register_func`,
without data as the first argument
- plain: A plain python function
"""
pipda_type = getattr(func, "__pipda__", None)
if pipda_type == "Verb":
return "verb"
if pipda_type == "Function":
return "func"
if pipda_type == "PlainFunction":
return "plain-func"
return "plain"
def bind_arguments(
func: Callable,
args: Tuple,
kwargs: Mapping[str, Any],
# type_check: bool = False,
# ignore_first: bool = False,
# ignore_types: Tuple[Type] = (Expression, )
) -> inspect.BoundArguments:
"""Try to bind arguments, instead of run the function to see if arguments
can fit the function
Args:
func: The function
args: The positional arguments to bind to the function
kwargs: The keyword arguments to bind to the function
type_check: Whether do the type check for the values
ignore_first: Whether ignore type check for the first argument
ignore_types: Types to be ignored (always return True for any values)
Raises:
TypeError: When arguments failed to bind or types of values
don't match argument type annotations if `type_check` is True.
Returns:
inspect.BoundArguments
"""
signature = inspect.signature(func)
try:
boundargs = signature.bind(*args, **kwargs)
except TypeError as terr:
raise TypeError(f"[{func.__qualname__}] {terr}") from None
# if len(boundargs.arguments) > 0 and type_check:
# # some arguments bound
# firstarg = list(signature.parameters)[0]
# for key, val in boundargs.arguments.items():
# if ignore_first and key == firstarg:
# continue
# annotation = signature.parameters[key].annotation
# if annotation is inspect._empty:
# continue
# if not instanceof(val, annotation, ignore=ignore_types):
# raise TypeError(
# f"[{func.__qualname__}] Argument `{key}` expect a value "
# f"of {annotation}, got {val}"
# )
boundargs.apply_defaults()
return boundargs
# Helper functions -----------------------------
@lru_cache()
def _get_piping_verb_node(calling_node: ast.Call) -> ast.Call:
"""Get the ast node that is ensured the piping verb call
Args:
calling_node: Current Call node
Returns:
The verb call node if found, otherwise None
"""
from .register import PIPING_SIGNS
from .verb import Verb
# check if we have the piping node (i.e. >>)
child = calling_node
parent = getattr(child, "parent", None)
token = PIPING_SIGNS[Verb.CURRENT_SIGN].token
while parent:
if (
# data >> verb(...)
(isinstance(parent, ast.BinOp) and parent.right is child)
or
# data >>= verb(...)
(isinstance(parent, ast.AugAssign) and parent.value is child)
) and isinstance(parent.op, token):
return child
child = parent
parent = getattr(parent, "parent", None)
return None
@lru_cache()
def _is_piping_verb_argument_node(
sub_node: ast.Call, verb_node: ast.Call
) -> bool:
"""Check if node func() is an argument of verb() (i.e. verb(func()))"""
if not verb_node:
return False
parent = sub_node
while parent:
if isinstance(parent, ast.Call) and (
parent is verb_node or _argument_node_of(parent) is verb_node
):
return True
if isinstance(parent, ast.Lambda):
# function inside lambda is not in a piping environment
return False
parent = getattr(parent, "parent", None)
# when verb_node is ensured, we can anyway retrieve it as the parent of
# sub_node
return False # pragma: no cover
@lru_cache()
def _argument_node_of(sub_node: ast.Call) -> ast.Call:
"""Get the Call node of a argument subnode"""
parent = getattr(sub_node, "parent", None)
while parent:
if isinstance(parent, ast.Call) and (
sub_node in parent.args or sub_node in parent.keywords
):
return parent
if isinstance(parent, ast.Lambda):
# function inside lambda is not in a piping environment
return None
sub_node = parent
parent = getattr(parent, "parent", None)
return None
|
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import math
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import os, sys
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
'''
traci提供实时交互接口
'''
sys.path.append(r"F:/software two/sumo-1.8.0/tools/xml")
import traci
from sumolib import checkBinary
import xml2csv
#绘图图式
plt.rcParams['figure.figsize']=(30,10)
plt.style.use('ggplot')
gui = True
if gui == True:
sumoBinary = r"F:\software two\sumo-1.8.0\bin/sumo-gui"
else:
sumoBinary = r"F:\software two\sumo-1.8.0\bin/sumo"
sumoCmd = [sumoBinary, "-c", r"F:\software two\sumo-1.8.0/file2/beihong1.sumo.cfg",'--tripinfo-output',r'F:\software two\sumo-1.8.0/file2/tripinfo2.xml','--duration-log.statistics']
np.random.seed(2) # reproducible
#全局变量
simulation_time =1200
H_0_meanspeed_list =[]
H_1_meanspeed_list =[]
H_2_meanspeed_list =[]
H_3_meanspeed_list =[]
H_4_meanspeed_list =[]
get_OOC0_list = []
get_OOC1_list = []
get_OOC2_list = []
get_OOC3_list = []
get_OOC4_list = []
get_OOCall_list = []
H_0_car_speed = 0
H_1_car_speed = 0
H_2_car_speed = 0
H_3_car_speed = 0
H_4_car_speed = 0
Actions_move =['r','G']
N_STATES = simulation_time # the length of the 1 dimensional world
ACTIONS = ['r', 'G'] # available actions
EPSILON = 0.9 # greedy police
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount factor
MAX_EPISODES = 1 # maximum episodes训练次数
FRESH_TIME = 0 # fresh time for one move
#RL----Q-learning
def build_q_table(n_states, actions):
table = pd.DataFrame(
np.zeros((n_states, len(actions))), # q_table initial values
columns=actions, # actions's name
)
# print(table) # show table
return table
def choose_action(state, q_table):
# This is how to choose an action
state_actions = q_table.iloc[state, :]
if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # act non-greedy or state-action have no value
action_name = np.random.choice(ACTIONS)
else: # act greedy
action_name = state_actions.idxmax() # replace argmax to idxmax as argmax means a different function in newer version of pandas
return action_name
def get_env_feedback(S,A,meanspeed):
# This is how agent will interact with the environment
if meanspeed >45 and A=='G':
R=1
S_ = S
else:
R=0.1
S_ = S
return S_, R
def rl():
# main part of RL loop
q_table = build_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
q_table_train = traci_control(N_STATES,q_table)
q_table_train.to_excel(r'F:\software two\sumo-1.8.0/file2/doc/'+'qtable'+str(episode)+'.xlsx',index=False)
episode +=1
return q_table
#RL----Q-learning---------------------------------
#-----control-----------------------------
def build_qq_table(step, actions):
table = pd.DataFrame(
np.zeros((step, len(actions))), # q_table initial values
columns=actions, # actions's name
)
return table
qq_table = build_qq_table(simulation_time,Actions_move)
def RedYellowGreen_control():
pass
def trafficlight_control(time):
if time % 20 > 0 and time % 20 < 10:
ramp = 'r' #r means red light
else:
ramp = 'G' #G means green light
return ramp
def trafficlight_control2(time):
H_12_car_mean_speed = (H_1_meanspeed_list[time]+H_2_meanspeed_list[time])/2
if H_12_car_mean_speed < 33:
ramp = 'r'
qq_table.iloc[time,0] = 1
else:
ramp = 'G'
qq_table.iloc[time,1] = 1
return ramp
#------plot------------------------------------
def print_lane_speed():#输出0 1 道路的平均速度
pass
def output_lane_speed(): #输出01234道路的平均速度
car_simple1 = []
car_simple2 = []
car_simple3 = []
car_simple4 = []
car_simple5 = []
car_simple_0_1_mean = []
car_index = []
car_speed = {'H_0':H_0_meanspeed_list,
'H_1':H_1_meanspeed_list,'H_2':H_2_meanspeed_list}
car_speed = pd.DataFrame(data=car_speed)
for i in range(0,car_speed.count()[0]):
if i % 20 == 0:
car_simple1.append(car_speed['H_0'][i])
car_simple2.append(car_speed['H_1'][i])
car_simple3.append(car_speed['H_2'][i])
car_simple_0_1_mean.append((car_speed['H_0'][i]+car_speed['H_1'][i])/2)
car_index.append(i/60)
car_simple_speed = {'H_0':car_simple1,'H_1':car_simple2,'H_2':car_simple3 }
car_simple_speed = pd.DataFrame(data = car_simple_speed,index = car_index)
ax = car_simple_speed[['H_0', 'H_1','H_2']].plot(fontsize =30)
plt.title('Lane car mean speed for RL control ',fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('speed km/h',fontsize = 30)
plt.show()
fig.savefig(r'F:\software two\sumo-1.8.0/file2/img1/' + 'car_speed_lane_RL.png')
car_mean_speed = {'H_0_1_mean':car_simple_0_1_mean}
car_mean_speed = pd.DataFrame(data = car_mean_speed,index = car_index)
ax= car_mean_speed[['H_0_1_mean']].plot(fontsize =30)
plt.title('car mean speed of H_1 and H_2 for RL control',fontsize = 25)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('speed km/h',fontsize = 30)
plt.show()
fig.savefig(r'F:\software two\sumo-1.8.0/file2/img1/' + 'car_mean_speed12_RL.png')
def output_lane_OOC(): #画图
get_OOC = {'H_0':get_OOC0_list,
'H_1':get_OOC1_list,'H_2':get_OOC2_list}
get_OOC = pd.DataFrame(data=get_OOC)
car_OOC_simple1 =[]
car_OOC_simple2 =[]
car_OOC_simple3 =[]
car_OOC_simple4 =[]
car_OOC_simple5 =[]
car_OOC_simpleall =[]
car_OOC_index =[]
for i in range(0,get_OOC.count()[0]):
if i % 20 == 0:
car_OOC_simple1.append(get_OOC['H_0'][i])
car_OOC_simple2.append(get_OOC['H_1'][i])
car_OOC_simple3.append(get_OOC['H_2'][i])
car_OOC_simpleall.append((get_OOC['H_0'][i]+get_OOC['H_1'][i])/2)
car_OOC_index.append(i/60)
car_simple_OOC = {'H_0':car_OOC_simple1,'H_1':car_OOC_simple2,'H_2':car_OOC_simple3 ,'H_all':car_OOC_simpleall}
car_simple_OOC = pd.DataFrame(data = car_simple_OOC,index = car_OOC_index)
ax = car_simple_OOC[['H_0', 'H_1','H_2']].plot(fontsize =30)
plt.title('Lane car OCC for RL control',fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('%',fontsize = 30)
plt.show()
fig.savefig(r'F:\software two\sumo-1.8.0/file2/img1/' + 'OCC_RL.png')
ax = car_simple_OOC[['H_all']].plot(fontsize =30)
plt.title('Lane car OCC_mean for RL control ',fontsize = 30)
fig = ax.get_figure()
plt.xlabel('time /min',fontsize = 30)
plt.ylabel('%',fontsize = 30)
plt.show()
fig.savefig(r'F:\software two\sumo-1.8.0/file2/img1/' + 'OCCmean_RL.png')
#traci控制
def traci_control_env_update(step_time,q_table):
traci.start(sumoCmd)
for i in range(0,3):
traci.lane.setMaxSpeed('H_'+str(i),27.78)
traci.lane.setMaxSpeed('H_0',20)
traci.lane.setMaxSpeed('H_1',20)
traci.lane.setMaxSpeed('H_2',20)
for step in range(0,step_time):
H_0_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_0')*3.6)
H_1_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_1')*3.6)
H_2_meanspeed_list.append(traci.lane.getLastStepMeanSpeed('H_2')*3.6)
H_01_meanspeed =(traci.lane.getLastStepMeanSpeed('H_0')*3.6+traci.lane.getLastStepMeanSpeed('H_1')*3.6)/2
get_OOC0_list.append(traci.lane.getLastStepOccupancy('H_0')*100)
get_OOC1_list.append(traci.lane.getLastStepOccupancy('H_1')*100)
get_OOC2_list.append(traci.lane.getLastStepOccupancy('H_2')*100)
get_OOCall_list.append((traci.lane.getLastStepOccupancy('H_0')+traci.lane.getLastStepOccupancy('H_1'))/4*100)
#仿真延迟
time.sleep(0.1)
#RL---Q-learing
S = step
A = choose_action(S, q_table)
S_, R = get_env_feedback(S, A,H_01_meanspeed) # take action & get next state and reward
q_predict = q_table.loc[S, A]
if step_time < N_STATES:
q_target = R + GAMMA * q_table.iloc[S_, :].max() # next state is not terminal
else:
q_target = R # next state is terminal
q_table.loc[S, A] += ALPHA * (q_target - q_predict) # update
S = S_ # move to next state
#交通信号灯控制
traci.trafficlight.setRedYellowGreenState(traci.trafficlight.getIDList()[0], choose_action(step, q_table)+'G') #trafficlight_control(step) trafficlight_control2(step)
#步长控制
traci.simulationStep(step +1)
# simulation_current_time = traci.simulation.getTime()
#目前时间
# print('simulation time is:',simulation_current_time)
#获取车辆ID
all_vehicle_id = traci.vehicle.getIDList()
#获取车辆位置
# all_vehicle_position = traci.vehicle.getPosition(step)
#获取车辆是否经过车线
try :# 获取截屏方法
pass
# 获取截屏
# traci.gui.screenshot('View #0',r'F:\software two\sumo-1.8.0/file1/img/img{}.jpg'.format(step),-1,-1)
# try:
# if traci.inductionloop.getLastStepVehicleNumber() > 0:
# traci.trafficlight.setRedYellowGreenState("0", "GGGGG")
# except:
# traci.close()
# break
except :
pass
# print(H_0_meanspeed)
traci.close(wait=True)
return q_table
'''
trafficlight_ID_list = traci.trafficlight.getIDList()
RedYellowGreenState = traci.trafficlight.getRedYellowGreenState(trafficlight_ID_list[0])
# print(trafficlight_ID_list[0],RedYellowGreenState)
# Lane_car_ID = traci.lanearea.getIDList()
# print(Lane_car_ID)
lane_ID = traci.lane.getIDList()
'''
'''
主函数
'''
if __name__ == "__main__":
#运行sumo
# traci.gui.setSchema('View #0','cus') #改变GUI为真实车辆
q_table = build_q_table(N_STATES, ACTIONS)
for episode in tqdm(range(MAX_EPISODES)):
H_0_meanspeed_list =[]
H_1_meanspeed_list =[]
H_2_meanspeed_list =[]
H_3_meanspeed_list =[]
H_4_meanspeed_list =[]
get_OOC0_list = []
get_OOC1_list = []
get_OOC2_list = []
get_OOC3_list = []
get_OOC4_list = []
get_OOCall_list = []
q_table_train = traci_control_env_update(N_STATES,q_table)
# if episode % 20 == 0:
# q_table_train.to_excel(r'F:\software two\sumo-1.8.0/file2/doc2/'+'qtable'+str(episode)+'.xlsx',index=False)
episode +=1
print('------------------------------------------------')
# output_lane_speed()
# output_lane_OOC()
# ax= qq_table[['r']].plot(fontsize =30)
# plt.title('qq_table ',fontsize = 30)
# fig = ax.get_figure()
# plt.xlabel('time',fontsize = 30)
# plt.ylabel(' ',fontsize = 30)
# plt.show()
# fig.savefig(r'F:\software two\sumo-1.8.0/file1/img/' + 'qqtable.png')
|
from __future__ import annotations
import jax.numpy as np
from jax import vmap
from jax.tree_util import register_pytree_node_class, tree_flatten, tree_unflatten
from jax.flatten_util import ravel_pytree
import jax.scipy.optimize
from tensorflow_probability.substrates import jax as tfp
import ssm.distributions as ssmd
tfd = tfp.distributions
class Emissions:
"""
Base class of emission distribution of an HMM
.. math::
p_t(x_t \mid z_t, u_t)
where u_t are optional covariates.
"""
def __init__(self, num_states: int) -> None:
self._num_states = num_states
@property
def num_states(self):
return self._num_states
@property
def emissions_shape(self):
raise NotImplementedError
def distribution(self, state, covariates=None, metadata=None):
"""
Return the conditional distribution of emission x_t
given state z_t and (optionally) covariates u_t.
"""
raise NotImplementedError
def log_likelihoods(self, data, covariates=None, metadata=None):
"""
Compute log p(x_t | z_t=k) for all t and k.
"""
inds = np.arange(self.num_states)
return vmap(lambda k: self.distribution(k, covariates=covariates, metadata=metadata).log_prob(data))(inds).T
# TODO: ensure_has_batched_dim?
def m_step(self, data, posterior, covariates=None, metadata=None) -> Emissions:
"""By default, try to optimize the emission distribution via generic
gradient-based optimization of the expected log likelihood.
This function assumes that the Emissions subclass is a PyTree and
that all of its leaf nodes are unconstrained parameters.
Args:
data (np.ndarray): the observed data
posterior (HMMPosterior): the HMM posterior
covariates (PyTree, optional): optional covariates with leaf shape (B, T, ...).
Defaults to None.
metadata (PyTree, optional): optional metadata with leaf shape (B, ...).
Defaults to None.
Returns:
emissions (ExponentialFamilyEmissions): updated emissions object
"""
# Use tree flatten and unflatten to convert params x0 from PyTrees to flat arrays
flat_self, unravel = ravel_pytree(self)
def _objective(flat_emissions):
emissions = unravel(flat_emissions)
f = lambda data, expected_states: \
np.sum(emissions.log_likelihoods(data, covariates=covariates, metadata=metadata) * expected_states)
lp = vmap(f)(data, posterior.expected_states).sum()
return -lp / data.size
results = jax.scipy.optimize.minimize(
_objective,
flat_self,
method="bfgs",
options=dict(maxiter=100))
# Update class parameters
return unravel(results.x)
class ExponentialFamilyEmissions(Emissions):
_emissions_distribution_class = None
def __init__(self,
num_states: int,
emissions_distribution: ssmd.ExponentialFamilyDistribution=None,
emissions_distribution_prior: ssmd.ConjugatePrior=None) -> None:
"""Exponential Family Emissions for HMM.
Can be initialized by specifying parameters or by passing in a pre-initialized
``emissions_distribution`` object.
Args:
num_states (int): number of discrete states
means (np.ndarray, optional): state-dependent emission means. Defaults to None.
covariances (np.ndarray, optional): state-dependent emission covariances. Defaults to None.
emissions_distribution (ssmd.MultivariateNormalTriL, optional): initialized emissions distribution.
Defaults to None.
emissions_distribution_prior (ssmd.NormalInverseWishart, optional): initialized emissions distribution prior.
Defaults to None.
"""
super(ExponentialFamilyEmissions, self).__init__(num_states)
self._distribution = emissions_distribution
self._prior = emissions_distribution_prior
def tree_flatten(self):
children = (self._distribution, self._prior)
aux_data = self.num_states
return children, aux_data
@classmethod
def tree_unflatten(cls, aux_data, children):
distribution, prior = children
return cls(aux_data,
emissions_distribution=distribution,
emissions_distribution_prior=prior)
@property
def emissions_shape(self):
return self._distribution.event_shape
def distribution(self, state: int, covariates=None, metadata=None) -> ssmd.MultivariateNormalTriL:
"""Get the emissions distribution at the provided state.
Args:
state (int): discrete state
covariates (PyTree, optional): optional covariates with leaf shape (B, T, ...).
Defaults to None.
metadata (PyTree, optional): optional metadata with leaf shape (B, ...).
Defaults to None.
Returns:
emissions distribution (tfd.MultivariateNormalLinearOperator):
emissions distribution at given state
"""
return self._distribution[state]
def m_step(self, dataset, posteriors, covariates=None, metadata=None) -> ExponentialFamilyEmissions:
"""Update the emissions distribution using an M-step.
Operates over a batch of data (posterior must have the same batch dim).
Args:
dataset (np.ndarray): the observed dataset
posteriors (HMMPosterior): the HMM posteriors
covariates (PyTree, optional): optional covariates with leaf shape (B, T, ...).
Defaults to None.
metadata (PyTree, optional): optional metadata with leaf shape (B, ...).
Defaults to None.
Returns:
emissions (ExponentialFamilyEmissions): updated emissions object
"""
conditional = self._emissions_distribution_class.compute_conditional(
dataset, weights=posteriors.expected_states, prior=self._prior)
self._distribution = self._emissions_distribution_class.from_params(
conditional.mode())
return self
@register_pytree_node_class
class BernoulliEmissions(ExponentialFamilyEmissions):
_emissions_distribution_class = ssmd.IndependentBernoulli
def __init__(self,
num_states: int,
probs: np.ndarray=None,
emissions_distribution: ssmd.MultivariateNormalTriL=None,
emissions_distribution_prior: ssmd.NormalInverseWishart=None) -> None:
"""Gaussian Emissions for HMM.
Can be initialized by specifying parameters or by passing in a pre-initialized
``emissions_distribution`` object.
Args:
num_states (int): number of discrete states
probs (np.ndarray, optional): state-dependent emission probabilities. Defaults to None.
covariances (np.ndarray, optional): state-dependent emission covariances. Defaults to None.
emissions_distribution (ssmd.MultivariateNormalTriL, optional): initialized emissions distribution.
Defaults to None.
emissions_distribution_prior (ssmd.NormalInverseWishart, optional): initialized emissions distribution prior.
Defaults to None.
"""
assert probs is not None or emissions_distribution is not None
if probs is not None:
emissions_distribution = ssmd.IndependentBernoulli(probs=probs)
if emissions_distribution_prior is None:
emissions_distribution_prior = ssmd.Beta(1.1, 1.1)
super(BernoulliEmissions, self).__init__(num_states,
emissions_distribution,
emissions_distribution_prior)
@register_pytree_node_class
class GaussianEmissions(ExponentialFamilyEmissions):
_emissions_distribution_class = ssmd.MultivariateNormalTriL
def __init__(self,
num_states: int,
means: np.ndarray=None,
covariances: np.ndarray=None,
emissions_distribution: ssmd.MultivariateNormalTriL=None,
emissions_distribution_prior: ssmd.NormalInverseWishart=None) -> None:
"""Gaussian Emissions for HMM.
Can be initialized by specifying parameters or by passing in a pre-initialized
``emissions_distribution`` object.
Args:
num_states (int): number of discrete states
means (np.ndarray, optional): state-dependent emission means. Defaults to None.
covariances (np.ndarray, optional): state-dependent emission covariances. Defaults to None.
emissions_distribution (ssmd.MultivariateNormalTriL, optional): initialized emissions distribution.
Defaults to None.
emissions_distribution_prior (ssmd.NormalInverseWishart, optional): initialized emissions distribution prior.
Defaults to None.
"""
assert (means is not None and covariances is not None) \
or emissions_distribution is not None
if means is not None and covariances is not None:
emissions_distribution = ssmd.MultivariateNormalTriL(means, covariances)
super(GaussianEmissions, self).__init__(num_states,
emissions_distribution,
emissions_distribution_prior)
@register_pytree_node_class
class PoissonEmissions(ExponentialFamilyEmissions):
_emissions_distribution_class = ssmd.IndependentPoisson
def __init__(self,
num_states: int,
rates: np.ndarray=None,
emissions_distribution: ssmd.IndependentPoisson=None,
emissions_distribution_prior: ssmd.Gamma=None) -> None:
"""Poisson Emissions for HMM.
Can be initialized by specifying parameters or by passing in a pre-initialized
``emissions_distribution`` object.
Args:
num_states (int): number of discrete states
rates (np.ndarray, optional): state-dependent Poisson rates. Defaults to None.
emissions_distribution (tfd.Distribution, optional): pre-initialized emissions distribution.
Defaults to None.
emissions_distribution_prior (tfd.Gamma, optional): pre-initialized emissions distribution prior.
Defaults to None.
"""
assert rates is not None or emissions_distribution is not None
if rates is not None:
emissions_distribution = ssmd.IndependentPoisson(rates)
super(PoissonEmissions, self).__init__(num_states,
emissions_distribution,
emissions_distribution_prior)
|
"""Custom decoder definition for transducer model."""
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import torch
from espnet.nets.pytorch_backend.transducer.blocks import build_blocks
from espnet.nets.pytorch_backend.transducer.utils import check_batch_states
from espnet.nets.pytorch_backend.transducer.utils import check_state
from espnet.nets.pytorch_backend.transducer.utils import pad_sequence
from espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.transducer_decoder_interface import ExtendedHypothesis
from espnet.nets.transducer_decoder_interface import Hypothesis
from espnet.nets.transducer_decoder_interface import TransducerDecoderInterface
class CustomDecoder(TransducerDecoderInterface, torch.nn.Module):
"""Custom decoder module for transducer model.
Args:
odim: Output dimension.
dec_arch: Decoder block architecture (type and parameters).
input_layer: Input layer type.
repeat_block: Number of times dec_arch is repeated.
positional_encoding_type: Positional encoding type.
positionwise_layer_type: Positionwise layer type.
positionwise_activation_type: Positionwise activation type.
dropout_rate_embed: Dropout rate for embedding layer.
blank_id: Blank symbol ID.
"""
def __init__(
self,
odim: int,
dec_arch: List,
input_layer: str = "embed",
repeat_block: int = 0,
joint_activation_type: str = "tanh",
positional_encoding_type: str = "abs_pos",
positionwise_layer_type: str = "linear",
positionwise_activation_type: str = "relu",
dropout_rate_embed: float = 0.0,
blank_id: int = 0,
):
"""Construct a CustomDecoder object."""
torch.nn.Module.__init__(self)
self.embed, self.decoders, ddim, _ = build_blocks(
"decoder",
odim,
input_layer,
dec_arch,
repeat_block=repeat_block,
positional_encoding_type=positional_encoding_type,
positionwise_layer_type=positionwise_layer_type,
positionwise_activation_type=positionwise_activation_type,
dropout_rate_embed=dropout_rate_embed,
padding_idx=blank_id,
)
self.after_norm = LayerNorm(ddim)
self.dlayers = len(self.decoders)
self.dunits = ddim
self.odim = odim
self.blank_id = blank_id
def set_device(self, device: torch.device):
"""Set GPU device to use.
Args:
device: Device ID.
"""
self.device = device
def init_state(
self,
batch_size: Optional[int] = None,
) -> List[Optional[torch.Tensor]]:
"""Initialize decoder states.
Args:
batch_size: Batch size.
Returns:
state: Initial decoder hidden states. [N x None]
"""
state = [None] * self.dlayers
return state
def forward(
self, dec_input: torch.Tensor, dec_mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Encode label ID sequences.
Args:
dec_input: Label ID sequences. (B, U)
dec_mask: Label mask sequences. (B, U)
Return:
dec_output: Decoder output sequences. (B, U, D_dec)
dec_output_mask: Mask of decoder output sequences. (B, U)
"""
dec_input = self.embed(dec_input)
dec_output, dec_mask = self.decoders(dec_input, dec_mask)
dec_output = self.after_norm(dec_output)
return dec_output, dec_mask
def score(
self, hyp: Hypothesis, cache: Dict[str, Any]
) -> Tuple[torch.Tensor, List[Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypothesis.
Args:
hyp: Hypothesis.
cache: Pairs of (dec_out, dec_state) for each label sequence. (key)
Returns:
dec_out: Decoder output sequence. (1, D_dec)
dec_state: Decoder hidden states. [N x (1, U, D_dec)]
lm_label: Label ID for LM. (1,)
"""
labels = torch.tensor([hyp.yseq], device=self.device)
lm_label = labels[:, -1]
str_labels = "".join(list(map(str, hyp.yseq)))
if str_labels in cache:
dec_out, dec_state = cache[str_labels]
else:
dec_out_mask = subsequent_mask(len(hyp.yseq)).unsqueeze_(0)
new_state = check_state(hyp.dec_state, (labels.size(1) - 1), self.blank_id)
dec_out = self.embed(labels)
dec_state = []
for s, decoder in zip(new_state, self.decoders):
dec_out, dec_out_mask = decoder(dec_out, dec_out_mask, cache=s)
dec_state.append(dec_out)
dec_out = self.after_norm(dec_out[:, -1])
cache[str_labels] = (dec_out, dec_state)
return dec_out[0], dec_state, lm_label
def batch_score(
self,
hyps: Union[List[Hypothesis], List[ExtendedHypothesis]],
dec_states: List[Optional[torch.Tensor]],
cache: Dict[str, Any],
use_lm: bool,
) -> Tuple[torch.Tensor, List[Optional[torch.Tensor]], torch.Tensor]:
"""One-step forward hypotheses.
Args:
hyps: Hypotheses.
dec_states: Decoder hidden states. [N x (B, U, D_dec)]
cache: Pairs of (h_dec, dec_states) for each label sequences. (keys)
use_lm: Whether to compute label ID sequences for LM.
Returns:
dec_out: Decoder output sequences. (B, D_dec)
dec_states: Decoder hidden states. [N x (B, U, D_dec)]
lm_labels: Label ID sequences for LM. (B,)
"""
final_batch = len(hyps)
process = []
done = [None] * final_batch
for i, hyp in enumerate(hyps):
str_labels = "".join(list(map(str, hyp.yseq)))
if str_labels in cache:
done[i] = cache[str_labels]
else:
process.append((str_labels, hyp.yseq, hyp.dec_state))
if process:
labels = pad_sequence([p[1] for p in process], self.blank_id)
labels = torch.LongTensor(labels, device=self.device)
p_dec_states = self.create_batch_states(
self.init_state(),
[p[2] for p in process],
labels,
)
dec_out = self.embed(labels)
dec_out_mask = (
subsequent_mask(labels.size(-1))
.unsqueeze_(0)
.expand(len(process), -1, -1)
)
new_states = []
for s, decoder in zip(p_dec_states, self.decoders):
dec_out, dec_out_mask = decoder(dec_out, dec_out_mask, cache=s)
new_states.append(dec_out)
dec_out = self.after_norm(dec_out[:, -1])
j = 0
for i in range(final_batch):
if done[i] is None:
state = self.select_state(new_states, j)
done[i] = (dec_out[j], state)
cache[process[j][0]] = (dec_out[j], state)
j += 1
dec_out = torch.stack([d[0] for d in done])
dec_states = self.create_batch_states(
dec_states, [d[1] for d in done], [[0] + h.yseq for h in hyps]
)
if use_lm:
lm_labels = torch.LongTensor(
[hyp.yseq[-1] for hyp in hyps], device=self.device
)
return dec_out, dec_states, lm_labels
return dec_out, dec_states, None
def select_state(
self, states: List[Optional[torch.Tensor]], idx: int
) -> List[Optional[torch.Tensor]]:
"""Get specified ID state from decoder hidden states.
Args:
states: Decoder hidden states. [N x (B, U, D_dec)]
idx: State ID to extract.
Returns:
state_idx: Decoder hidden state for given ID. [N x (1, U, D_dec)]
"""
if states[0] is None:
return states
state_idx = [states[layer][idx] for layer in range(self.dlayers)]
return state_idx
def create_batch_states(
self,
states: List[Optional[torch.Tensor]],
new_states: List[Optional[torch.Tensor]],
check_list: List[List[int]],
) -> List[Optional[torch.Tensor]]:
"""Create decoder hidden states sequences.
Args:
states: Decoder hidden states. [N x (B, U, D_dec)]
new_states: Decoder hidden states. [B x [N x (1, U, D_dec)]]
check_list: Label ID sequences.
Returns:
states: New decoder hidden states. [N x (B, U, D_dec)]
"""
if new_states[0][0] is None:
return states
max_len = max(len(elem) for elem in check_list) - 1
for layer in range(self.dlayers):
states[layer] = check_batch_states(
[s[layer] for s in new_states], max_len, self.blank_id
)
return states
|
# C01P13 - Integer prime factorization
# Video - https://youtu.be/CR_19VUt0tI
def is_prime(n):
if n < 2:
return False
for d in range(2, n):
if n % d == 0:
return False
return True
def next_prime(n):
n = n + 1
while not is_prime(n):
n = n + 1
return n
def prime_factorization(n):
result = []
p = 2
while n != 1:
a = 0
while n % p == 0:
a = a + 1
n = n // p
if a > 0:
result.append((p, a))
p = next_prime(p)
return result
tests = [
(2, [(2, 1)]),
(4, [(2, 2)]),
(10, [(2, 1), (5, 1)]), # This is 2^1 * 5^1
(14, [(2, 1), (7, 1)]),
(356, [(2, 2), (89, 1)]),
(89, [(89, 1)]), # 89 is a prime number
(1000, [(2, 3), (5, 3)])
]
for n, expected in tests:
result = prime_factorization(n)
print(result == expected, result)
|
"""
SensorPush for Home Assistant
See https://github.com/rsnodgrass/hass-sensorpush
"""
import logging
import time
from datetime import timedelta
import voluptuous as vol
from requests.exceptions import HTTPError, ConnectTimeout
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import dispatcher_send, async_dispatcher_connect
from homeassistant.helpers.event import track_time_interval
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.const import CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_SCAN_INTERVAL
LOG = logging.getLogger(__name__)
SENSORPUSH_DOMAIN = 'sensorpush'
SENSORPUSH_SERVICE = 'sensorpush_service'
SENSORPUSH_SAMPLES = 'sensorpush_samples'
SIGNAL_SENSORPUSH_UPDATED = 'sensorpush_updated'
NOTIFICATION_ID = 'sensorpush_notification'
NOTIFICATION_TITLE = 'SensorPush'
ATTR_BATTERY_VOLTAGE = 'battery_voltage'
ATTR_DEVICE_ID = 'device_id'
ATTR_OBSERVED_TIME = 'observed_time'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_MAXIMUM_AGE = 'maximum_age' # maximum age (in minutes) of observations before they expire
UNIT_SYSTEM_IMPERIAL = 'imperial'
UNIT_SYSTEM_METRIC = 'metric'
UNIT_SYSTEMS = {
UNIT_SYSTEM_IMPERIAL: {
'system': 'imperial',
'temperature': '°F',
'humidity': '%' # 'Rh'
},
UNIT_SYSTEM_METRIC: {
'system': 'metric',
'temperature': '°C',
'humidity': '%' # 'Rh'
}
}
CONFIG_SCHEMA = vol.Schema({
SENSORPUSH_DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=60): cv.positive_int,
vol.Optional(CONF_UNIT_SYSTEM, default='imperial'): cv.string,
vol.Optional(CONF_MAXIMUM_AGE, default=30): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA
)
DATA_UPDATED = "sensorpush_data_updated"
def setup(hass, config):
"""Initialize the SensorPush integration"""
conf = config[SENSORPUSH_DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
from pysensorpush import PySensorPush
sensorpush_service = PySensorPush(username, password)
#if not sensorpush_service.is_connected:
# return False
# FIXME: log warning if no sensors found?
# share reference to the service with other components/platforms running within HASS
hass.data[SENSORPUSH_SERVICE] = sensorpush_service
hass.data[SENSORPUSH_SAMPLES] = sensorpush_service.samples
except (ConnectTimeout, HTTPError) as ex:
LOG.error("Unable to connect to SensorPush: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart Home Assistant after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
def refresh_sensorpush_data(event_time):
"""Call SensorPush service to refresh latest data"""
LOG.debug("Updating data from SensorPush cloud API")
# TODO: discovering new devices (and auto-configuring HASS sensors) is not supported
#hass.data[SENSORPUSH_SERVICE].update(update_devices=True)
# retrieve the latest samples from the SensorPush cloud service
latest_samples = hass.data[SENSORPUSH_SERVICE].samples
if latest_samples:
hass.data[SENSORPUSH_SAMPLES] = latest_samples
# notify all listeners (sensor entities) that they may have new data
dispatcher_send(hass, SIGNAL_SENSORPUSH_UPDATED)
else:
LOG.warn("Unable to fetch latest samples from SensorPush cloud")
# subscribe for notifications that an update should be triggered
hass.services.register(SENSORPUSH_DOMAIN, 'update', refresh_sensorpush_data)
# automatically update SensorPush data (samples) on the scan interval
scan_interval = timedelta(seconds = conf.get(CONF_SCAN_INTERVAL))
track_time_interval(hass, refresh_sensorpush_data, scan_interval)
return True
class SensorPushEntity(RestoreEntity):
"""Base Entity class for SensorPush devices"""
def __init__(self, hass, config, name_suffix, sensor_info, unit_system, field_name):
self._hass = hass
self._sensor_info = sensor_info
self._unit_system = unit_system
self._device_id = sensor_info.get('id')
self._field_name = field_name
self._attrs = {}
self._name = f"{sensor_info.get('name')} {name_suffix}"
@property
def name(self):
"""Return the display name for this sensor"""
return self._name
@property
def icon(self):
return 'mdi:gauge'
@property
def state(self):
return self._state
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@callback
def _update_callback(self):
"""Call update method."""
samples = self._hass.data[SENSORPUSH_SAMPLES]
sensor_results = samples['sensors']
sensor_data = sensor_results[self._device_id]
latest_result = sensor_data[0]
# FIXME: check data['observed'] time against config[CONF_MAXIMUM_AGE], ignoring stale entries
self._state = float(latest_result.get(self._field_name))
self._attrs.update({
ATTR_OBSERVED_TIME : latest_result['observed'],
ATTR_BATTERY_VOLTAGE : self._sensor_info.get('battery_voltage') # FIXME: not updated except on restarts of Home Assistant
})
LOG.debug(f"Updated {self._name} to {self._state} {self.unit_of_measurement} : {latest_result}")
# let Home Assistant know that SensorPush data for this entity has been updated
self.async_schedule_update_ha_state()
async def async_added_to_hass(self) -> None:
await super().async_added_to_hass()
# register callback when cached SensorPush data has been updated
async_dispatcher_connect(self.hass, SIGNAL_SENSORPUSH_UPDATED, self._update_callback)
# on restart, attempt to restore previous state (see https://aarongodfrey.dev/programming/restoring-an-entity-in-home-assistant/)
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
# restore any attributes
for attribute in [ATTR_OBSERVED_TIME, ATTR_BATTERY_VOLTAGE]:
if attribute in state.attributes:
self._attrs[attribute] = state.attributes[attribute]
LOG.debug(f"Restored sensor {self._name} previous state {self._state}: {self._attrs}")
async_dispatcher_connect(
self._hass, DATA_UPDATED, self._schedule_immediate_update
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.