content
stringlengths 5
1.05M
|
|---|
# Program 02d : Power series solution of second order ODE.
# See Example 8.
from sympy import dsolve, Function, pprint
from sympy.abc import t
x = Function('x')
ODE2 = x(t).diff(t, 2) + t**2*x(t).diff(t) - x(t)
pprint(dsolve(ODE2, hint='2nd_power_series_ordinary', n=6))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbdishMaterialInfo(object):
def __init__(self):
self._add_price = None
self._create_user = None
self._ext_info = None
self._material_id = None
self._material_img = None
self._material_name = None
self._material_type = None
self._max_num = None
self._merchant_id = None
self._public_id = None
self._update_user = None
@property
def add_price(self):
return self._add_price
@add_price.setter
def add_price(self, value):
self._add_price = value
@property
def create_user(self):
return self._create_user
@create_user.setter
def create_user(self, value):
self._create_user = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def material_id(self):
return self._material_id
@material_id.setter
def material_id(self, value):
self._material_id = value
@property
def material_img(self):
return self._material_img
@material_img.setter
def material_img(self, value):
self._material_img = value
@property
def material_name(self):
return self._material_name
@material_name.setter
def material_name(self, value):
self._material_name = value
@property
def material_type(self):
return self._material_type
@material_type.setter
def material_type(self, value):
self._material_type = value
@property
def max_num(self):
return self._max_num
@max_num.setter
def max_num(self, value):
self._max_num = value
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
@property
def public_id(self):
return self._public_id
@public_id.setter
def public_id(self, value):
self._public_id = value
@property
def update_user(self):
return self._update_user
@update_user.setter
def update_user(self, value):
self._update_user = value
def to_alipay_dict(self):
params = dict()
if self.add_price:
if hasattr(self.add_price, 'to_alipay_dict'):
params['add_price'] = self.add_price.to_alipay_dict()
else:
params['add_price'] = self.add_price
if self.create_user:
if hasattr(self.create_user, 'to_alipay_dict'):
params['create_user'] = self.create_user.to_alipay_dict()
else:
params['create_user'] = self.create_user
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.material_id:
if hasattr(self.material_id, 'to_alipay_dict'):
params['material_id'] = self.material_id.to_alipay_dict()
else:
params['material_id'] = self.material_id
if self.material_img:
if hasattr(self.material_img, 'to_alipay_dict'):
params['material_img'] = self.material_img.to_alipay_dict()
else:
params['material_img'] = self.material_img
if self.material_name:
if hasattr(self.material_name, 'to_alipay_dict'):
params['material_name'] = self.material_name.to_alipay_dict()
else:
params['material_name'] = self.material_name
if self.material_type:
if hasattr(self.material_type, 'to_alipay_dict'):
params['material_type'] = self.material_type.to_alipay_dict()
else:
params['material_type'] = self.material_type
if self.max_num:
if hasattr(self.max_num, 'to_alipay_dict'):
params['max_num'] = self.max_num.to_alipay_dict()
else:
params['max_num'] = self.max_num
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
if self.public_id:
if hasattr(self.public_id, 'to_alipay_dict'):
params['public_id'] = self.public_id.to_alipay_dict()
else:
params['public_id'] = self.public_id
if self.update_user:
if hasattr(self.update_user, 'to_alipay_dict'):
params['update_user'] = self.update_user.to_alipay_dict()
else:
params['update_user'] = self.update_user
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbdishMaterialInfo()
if 'add_price' in d:
o.add_price = d['add_price']
if 'create_user' in d:
o.create_user = d['create_user']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'material_id' in d:
o.material_id = d['material_id']
if 'material_img' in d:
o.material_img = d['material_img']
if 'material_name' in d:
o.material_name = d['material_name']
if 'material_type' in d:
o.material_type = d['material_type']
if 'max_num' in d:
o.max_num = d['max_num']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
if 'public_id' in d:
o.public_id = d['public_id']
if 'update_user' in d:
o.update_user = d['update_user']
return o
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 1 16:32:54 2021
@author: caear
"""
import random
import pylab
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
def plotMeans(numDice, numRolls, numBins, legend, color, style):
means = []
for i in range(numRolls//numDice):
vals = 0
for j in range(numDice):
vals += 5*random.random()
means.append(vals/float(numDice))
pylab.hist(means, numBins, color = color, label = legend,
weights = pylab.array(len(means)*[1])/len(means),
hatch = style)
return getMeanAndStd(means)
mean, std = plotMeans(1, 1000000, 19, '1 die', 'b', '*')
print('Mean of rolling 1 die =', str(mean) + ',', 'Std =', std)
mean, std = plotMeans(50, 1000000, 19, 'Mean of 50 dice', 'r', '//')
pylab.title('Rolling Continuos Dice')
pylab.xlabel('Value')
pylab.ylabel('Probability')
pylab.legend()
|
#
# Database access functions for the web forum.
#
import time
import psycopg2
# Database connection
def ConnectDB():
try:
DB = psycopg2.connect("dbname=forum")
except:
print "Connectivity Problem."
return DB
# Get posts from database.
def GetAllPosts():
'''Get all the posts from the database, sorted with the newest first.
Returns:
A list of dictionaries, where each dictionary has a 'content' key
pointing to the post content, and 'time' key pointing to the time
it was posted.
'''
DB = ConnectDB()
c = DB.cursor()
c.execute("SELECT time, content FROM posts ORDER BY time DESC")
posts = [{'content': str(row[1]), 'time': str(row[0])}
for row in c.fetchall()]
DB.close()
return posts
# Add a post to the database.
def AddPost(content):
'''Add a new post to the database.
Args:
content: The text content of the new post.
'''
DB = ConnectDB()
c = DB.cursor()
c.execute("INSERT INTO posts (content) VALUES (%s);", (content,))
c.execute("UPDATE posts SET content='cheese' WHERE content LIKE '%spam%';")
c.execute("DELETE FROM posts WHERE content='cheese';")
DB.commit()
DB.close()
|
"""
Class Method / Function decorators
**Copyright**::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| License: X11 / MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
"""
import asyncio
import functools
import logging
from enum import Enum
from time import sleep
from typing import Any, Union, List
from privex.helpers.cache import cached, async_adapter_get
from privex.helpers.common import empty, is_true
from privex.helpers.asyncx import await_if_needed
DEF_RETRY_MSG = "Exception while running '%s', will retry %d more times."
DEF_FAIL_MSG = "Giving up after attempting to retry function '%s' %d times."
log = logging.getLogger(__name__)
try:
from privex.helpers.asyncx import async_sync
except ImportError:
log.debug('privex.helpers __init__ failed to import "asyncx", not loading async helpers')
pass
def retry_on_err(max_retries: int = 3, delay: Union[int, float] = 3, **retry_conf):
"""
Decorates a function or class method, wraps the function/method with a try/catch block, and will automatically
re-run the function with the same arguments up to `max_retries` time after any exception is raised, with a
``delay`` second delay between re-tries.
If it still throws an exception after ``max_retries`` retries, it will log the exception details with ``fail_msg``,
and then re-raise it.
Usage (retry up to 5 times, 1 second between retries, stop immediately if IOError is detected)::
>>> @retry_on_err(5, 1, fail_on=[IOError])
... def my_func(self, some=None, args=None):
... if some == 'io': raise IOError()
... raise FileExistsError()
This will be re-ran 5 times, 1 second apart after each exception is raised, before giving up::
>>> my_func()
Where-as this one will immediately re-raise the caught IOError on the first attempt, as it's passed in ``fail_on``::
>>> my_func('io')
.. Attention:: For safety reasons, by default ``max_ignore`` is set to ``100``. This means after 100 retries where an
exception was ignored, the decorator will give up and raise the last exception.
This is to prevent the risk of infinite loops hanging your application. If you are 100% certain that the
function you've wrapped, and/or the exceptions passed in ``ignore`` cannot cause an infinite retry loop, then
you can pass ``max_ignore=False`` to the decorator to disable failure after ``max_ignore`` ignored exceptions.
:param int max_retries: Maximum total retry attempts before giving up
:param float delay: Amount of time in seconds to sleep before re-trying the wrapped function
:param retry_conf: Less frequently used arguments, pass in as keyword args (see below)
:key list fail_on: A list() of Exception types that should result in immediate failure (don't retry, raise)
:key list ignore: A list() of Exception types that should be ignored (will retry, but without incrementing the failure counter)
:key int|bool max_ignore: (Default: ``100``) If an exception is raised while retrying, and more than this
many exceptions (listed in ``ignore``) have been ignored during retry attempts, then give up
and raise the last exception.
This feature is designed to prevent "ignored" exceptions causing an infinite retry loop. By
default ``max_ignore`` is set to ``100``, but you can increase/decrease this as needed.
You can also set it to ``False`` to disable raising when too many exceptions are ignored - however, it's
strongly not recommended to disable ``max_ignore``, especially if you have ``instance_match=True``,
as it could cause an infinite retry loop which hangs your application.
:key bool instance_match: (Default: ``False``) If this is set to ``True``, then the exception type comparisons for ``fail_on``
and ``ignore`` will compare using ``isinstance(e, x)`` instead of ``type(e) is x``.
If this is enabled, then exceptions listed in ``fail_on`` and ``ignore`` will also **match sub-classes** of
the listed exceptions, instead of exact matches.
:key str retry_msg: Override the log message used for retry attempts. First message param %s is func name,
second message param %d is retry attempts remaining
:key str fail_msg: Override the log message used after all retry attempts are exhausted. First message param %s
is func name, and second param %d is amount of times retried.
"""
retry_msg: str = retry_conf.get('retry_msg', DEF_RETRY_MSG)
fail_msg: str = retry_conf.get('fail_msg', DEF_FAIL_MSG)
instance_match: bool = is_true(retry_conf.get('instance_match', False))
fail_on: List[type] = list(retry_conf.get('fail_on', []))
ignore_ex: List[type] = list(retry_conf.get('ignore', []))
max_ignore: Union[bool, int] = retry_conf.get('max_ignore', 100)
def _decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
retries = int(kwargs.pop('retry_attempts', 0))
ignore_count = int(kwargs.pop('ignore_count', 0))
try:
return f(*args, **kwargs)
except Exception as e:
_fail = isinstance(e, tuple(fail_on)) if instance_match else type(e) in fail_on
if _fail:
log.warning('Giving up. Re-raising exception %s (as requested by `fail_on` arg)', type(e))
raise e
if max_ignore is not False and ignore_count > max_ignore:
log.warning('Giving up. Ignored too many exceptions (max_ignore: %d, ignore_count: %d). '
'Re-raising exception %s.', max_ignore, ignore_count, type(e))
raise e
if retries < max_retries:
log.info('%s - %s', type(e), str(e))
log.info(retry_msg, f.__name__, max_retries - retries)
sleep(delay)
# If 'instance_match' is enabled, we check if the exception was an instance of any of the passed exception types,
# otherwise we use exact exception type comparison against the list.
# _ignore is True if we should ignore this exception (don't increment retries), or False if we should increment.
_ignore = isinstance(e, tuple(ignore_ex)) if instance_match else type(e) in ignore_ex
if _ignore:
log.debug(
" >> (?) Ignoring exception '%s' as exception is in 'ignore' list. Ignore Count: %d // "
"Max Ignores: %d // Instance Match: %s", type(e), ignore_count, max_ignore, instance_match
)
kwargs['retry_attempts'] = retries if _ignore else retries + 1
kwargs['ignore_count'] = ignore_count + 1 if _ignore else ignore_count
return wrapper(*args, **kwargs)
log.exception(fail_msg, f.__name__, max_retries)
raise e
return wrapper
return _decorator
def async_retry(max_retries: int = 3, delay: Union[int, float] = 3, **retry_conf):
"""
AsyncIO coroutine compatible version of :func:`.retry_on_err` - for painless automatic retry-on-exception for async code.
Decorates an AsyncIO coroutine (``async def``) function or class method, wraps the function/method with a try/catch block, and
will automatically re-run the function with the same arguments up to `max_retries` time after any exception is raised, with a
``delay`` second delay between re-tries.
If it still throws an exception after ``max_retries`` retries, it will log the exception details with ``fail_msg``,
and then re-raise it.
Usage (retry up to 5 times, 1 second between retries, stop immediately if IOError is detected)::
>>> from privex.helpers import async_retry
>>>
>>> @async_retry(5, 1, fail_on=[IOError])
... async def my_func(some=None, args=None):
... if some == 'io': raise IOError()
... raise FileExistsError()
...
This will be re-ran 5 times, 1 second apart after each exception is raised, before giving up::
>>> await my_func()
Where-as this one will immediately re-raise the caught IOError on the first attempt, as it's passed in ``fail_on``::
>>> await my_func('io')
We can also use ``ignore_on`` to "ignore" certain exceptions. Ignored exceptions cause the function to be retried with a delay, as
normal, but without incrementing the total retries counter.
>>> from privex.helpers import async_retry
>>> import random
>>>
>>> @async_retry(5, 1, fail_on=[IOError], ignore=[ConnectionResetError])
... async def my_func(some=None, args=None):
... if random.randint(1,10) > 7: raise ConnectionResetError()
... if some == 'io': raise IOError()
... raise FileExistsError()
...
To show this at work, we've enabled debug logging for you to see::
>>> await my_func()
[INFO] <class 'ConnectionResetError'> -
[INFO] Exception while running 'my_func', will retry 5 more times.
[DEBUG] >> (?) Ignoring exception '<class 'ConnectionResetError'>' as exception is in 'ignore' list.
Ignore Count: 0 // Max Ignores: 100 // Instance Match: False
[INFO] <class 'FileExistsError'> -
[INFO] Exception while running 'my_func', will retry 5 more times.
[INFO] <class 'ConnectionResetError'> -
[INFO] Exception while running 'my_func', will retry 4 more times.
[DEBUG] >> (?) Ignoring exception '<class 'ConnectionResetError'>' as exception is in 'ignore' list.
Ignore Count: 1 // Max Ignores: 100 // Instance Match: False
[INFO] <class 'FileExistsError'> -
[INFO] Exception while running 'my_func', will retry 4 more times.
As you can see above, when an ignored exception (``ConnectionResetError``) occurs, the remaining retry attempts doesn't go down.
Instead, only the "Ignore Count" goes up.
.. Attention:: For safety reasons, by default ``max_ignore`` is set to ``100``. This means after 100 retries where an
exception was ignored, the decorator will give up and raise the last exception.
This is to prevent the risk of infinite loops hanging your application. If you are 100% certain that the
function you've wrapped, and/or the exceptions passed in ``ignore`` cannot cause an infinite retry loop, then
you can pass ``max_ignore=False`` to the decorator to disable failure after ``max_ignore`` ignored exceptions.
:param int max_retries: Maximum total retry attempts before giving up
:param float delay: Amount of time in seconds to sleep before re-trying the wrapped function
:param retry_conf: Less frequently used arguments, pass in as keyword args (see below)
:key list fail_on: A list() of Exception types that should result in immediate failure (don't retry, raise)
:key list ignore: A list() of Exception types that should be ignored (will retry, but without incrementing the failure counter)
:key int|bool max_ignore: (Default: ``100``) If an exception is raised while retrying, and more than this
many exceptions (listed in ``ignore``) have been ignored during retry attempts, then give up
and raise the last exception.
This feature is designed to prevent "ignored" exceptions causing an infinite retry loop. By
default ``max_ignore`` is set to ``100``, but you can increase/decrease this as needed.
You can also set it to ``False`` to disable raising when too many exceptions are ignored - however, it's
strongly not recommended to disable ``max_ignore``, especially if you have ``instance_match=True``,
as it could cause an infinite retry loop which hangs your application.
:key bool instance_match: (Default: ``False``) If this is set to ``True``, then the exception type comparisons for ``fail_on``
and ``ignore`` will compare using ``isinstance(e, x)`` instead of ``type(e) is x``.
If this is enabled, then exceptions listed in ``fail_on`` and ``ignore`` will also **match sub-classes** of
the listed exceptions, instead of exact matches.
:key str retry_msg: Override the log message used for retry attempts. First message param %s is func name,
second message param %d is retry attempts remaining
:key str fail_msg: Override the log message used after all retry attempts are exhausted. First message param %s
is func name, and second param %d is amount of times retried.
"""
retry_msg: str = retry_conf.get('retry_msg', DEF_RETRY_MSG)
fail_msg: str = retry_conf.get('fail_msg', DEF_FAIL_MSG)
instance_match: bool = is_true(retry_conf.get('instance_match', False))
fail_on: List[type] = list(retry_conf.get('fail_on', []))
ignore_ex: List[type] = list(retry_conf.get('ignore', []))
max_ignore: Union[bool, int] = retry_conf.get('max_ignore', 100)
def _decorator(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
retries = int(kwargs.pop('retry_attempts', 0))
ignore_count = int(kwargs.pop('ignore_count', 0))
try:
return await f(*args, **kwargs)
except Exception as e:
# If instance_match is enabled, check exception type using isinstance, otherwise use exact type matches
_fail = isinstance(e, tuple(fail_on)) if instance_match else type(e) in fail_on
if _fail:
log.warning('Giving up. Re-raising exception %s (as requested by `fail_on` arg)', type(e))
raise e
if max_ignore is not False and ignore_count > max_ignore:
log.warning('Giving up. Ignored too many exceptions (max_ignore: %d, ignore_count: %d). '
'Re-raising exception %s.', max_ignore, ignore_count, type(e))
raise e
if retries < max_retries:
log.info('%s - %s', type(e), str(e))
log.info(retry_msg, f.__name__, max_retries - retries)
await asyncio.sleep(delay)
# If 'instance_match' is enabled, we check if the exception was an instance of any of the passed exception types,
# otherwise we use exact exception type comparison against the list.
# _ignore is True if we should ignore this exception (don't increment retries), or False if we should increment.
_ignore = isinstance(e, tuple(ignore_ex)) if instance_match else type(e) in ignore_ex
if _ignore:
log.debug(
" >> (?) Ignoring exception '%s' as exception is in 'ignore' list. Ignore Count: %d // "
"Max Ignores: %d // Instance Match: %s", type(e), ignore_count, max_ignore, instance_match
)
kwargs['retry_attempts'] = retries if _ignore else retries + 1
kwargs['ignore_count'] = ignore_count + 1 if _ignore else ignore_count
return await wrapper(*args, **kwargs)
log.exception(fail_msg, f.__name__, max_retries)
raise e
return wrapper
return _decorator
class FormatOpt(Enum):
"""
This enum represents various options available for :py:func:`.r_cache` 's ``format_opt`` parameter.
To avoid bloating the PyDoc for ``r_cache`` too much, descriptions for each formatting option is available as a
short PyDoc comment under each enum option.
Usage:
>>> @r_cache('mykey', format_args=[0, 'x'], format_opt=FormatOpt.POS_AUTO)
"""
POS_AUTO = 'force_pos'
"""
First attempt to format using ``*args`` whitelisted in ``format_args``, if that causes a KeyError/IndexError,
then pass kwarg values in the order they're listed in ``format_args``
(only includes kwarg names listed in ``format_args``)
# def func(x, y)
func('a', 'b') # assuming 0 and 1 are in format_args, then it would use .format('a', 'b')
func(y='b', x='a') # assuming format_args = ``['x','y']``, then it would use .format('a', 'b')
"""
POS_ONLY = 'pos_only'
"""Only use positional args for formatting the cache key, kwargs will be ignored completely."""
KWARG_ONLY = 'kwarg'
"""Only use kwargs for formatting the cache key - requires named format placeholders, i.e. ``mykey:{x}``"""
MIX = 'mix'
"""
Use both ``*args`` and ``**kwargs`` to format the cache_key (assuming mixed placeholders e.g. ``mykey:{}:{y}``
"""
FO = FormatOpt
def _format_key(args, kwargs, cache_key: str, whitelist: bool = True, fmt_opt: FO = FO.POS_AUTO, fmt_args: list = None):
"""Internal function used by :func:`.r_cache` and :func:`.r_cache_async` for formatting a cache key e.g. ``pvx:{}:{}``"""
pos_args = args
kw_args = kwargs
if whitelist:
# Whitelisted positional arguments
pos_args = [args[i] for i in fmt_args if type(i) is int and len(args) > i]
# Whitelisted keyword args, as a dict
kw_args = {i: kwargs[i] for i in fmt_args if type(i) is str and i in kwargs}
if fmt_opt == FormatOpt.POS_AUTO:
log.debug('Format: POS_AUTO - Formatting with *args, fallback on err to positional **kwargs values')
try:
log.debug('Attempting to format with args: %s', pos_args)
rk = cache_key.format(*pos_args)
except (KeyError, IndexError):
pos_kwargs = [v for _, v in kw_args.items()]
log.debug('Failed to format with pos args, now trying positional kwargs: %s', pos_kwargs)
rk = cache_key.format(*pos_kwargs)
return rk
if fmt_opt == FormatOpt.KWARG_ONLY: # Only attempt to format cache_key using kwargs
log.debug('Format: KWARG_ONLY - Formatting using only **kwargs')
return cache_key.format(**kw_args)
if fmt_opt == FormatOpt.POS_ONLY: # Only attempt to format cache_key using positional args
log.debug('Format: POS_ONLY - Formatting using only *args')
return cache_key.format(*pos_args)
if fmt_opt == FormatOpt.MIX: # Format cache_key with both positional and kwargs as-is
log.debug('Format: MIX - Formatting using passthru *args and **kwargs')
return cache_key.format(*pos_args, **kw_args)
def r_cache_async(cache_key: Union[str, callable], cache_time=300, format_args: list = None, format_opt: FO = FO.POS_AUTO, **opts) -> Any:
"""
Async function/method compatible version of :func:`.r_cache` - see docs for :func:`.r_cache`
You can bypass caching by passing ``r_cache=False`` to the wrapped function.
Basic usage::
>>> from privex.helpers import r_cache_async
>>> @r_cache_async('my_cache_key')
>>> async def some_func(some: int, args: int = 2):
... return some + args
>>> await some_func(5, 10)
15
>>> # If we await some_func a second time, we'll get '15' again because it was cached.
>>> await some_func(2, 3)
15
Async ``cache_key`` generation (you can also use normal synchronous functions/lambdas)::
>>> from privex.helpers import r_cache_async
>>>
>>> async def make_key(name, title):
... return f"mycache:{name}"
...
>>> @r_cache_async(make_key)
... async def who(name, title):
... return "Their name is {title} {name}"
...
:param FormatOpt format_opt: (default: :py:attr:`.FormatOpt.POS_AUTO`) "Format option" - how should args/kwargs be
used when filling placeholders in the ``cache_key`` (see comments on FormatOption)
:param list format_args: A list of positional arguments numbers (e.g. ``[0, 1, 2]``) and/or kwargs
``['x', 'y', 'z']`` that should be used to format the `cache_key`
:param str cache_key: The cache key to store the cached data into, e.g. `mydata`
:param int cache_time: The amount of time in seconds to cache the result for (default: 300 seconds)
:keyword bool whitelist: (default: ``True``) If True, only use specified arg positions / kwarg keys when formatting
``cache_key`` placeholders. Otherwise, trust whatever args/kwargs were passed to the func.
:return Any res: The return result, either from the wrapped function, or from the cache.
"""
fmt_args = [] if not format_args else format_args
# Using normal 'cached' often results in "event loop already running" errors due to the synchronous async wrapper
# in CacheWrapper. So to be safe, we get the adapter directly to avoid issues.
cache_adapter = async_adapter_get()
whitelist = opts.get('whitelist', True)
def _decorator(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
# Extract r_cache and r_cache_key from the wrapped function's kwargs if they're specified,
# then remove them from the kwargs so they don't interfere with the wrapped function.
enable_cache, rk = kwargs.get('r_cache', True), kwargs.get('r_cache_key', cache_key)
if 'r_cache' in kwargs: del kwargs['r_cache']
if 'r_cache_key' in kwargs: del kwargs['r_cache_key']
if not isinstance(rk, str):
rk = await await_if_needed(rk, *args, **kwargs)
elif not empty(fmt_args, itr=True) or not whitelist:
# If the cache key contains a format placeholder, e.g. {somevar} - then attempt to replace the
# placeholders using the function's kwargs
log.debug('Format_args not empty (or whitelist=False), formatting cache_key "%s"', cache_key)
rk = _format_key(args, kwargs, cache_key=cache_key, whitelist=whitelist, fmt_opt=format_opt, fmt_args=format_args)
# To ensure no event loop / thread cache instance conflicts, we use the cache adapter as a context manager, which
# is supposed to disconnect + destroy the connection library instance, and re-create it in the current loop/thread.
async with cache_adapter as r:
# If using an async cache adapter, r.get might be async...
log.debug('Trying to load "%s" from cache', rk)
data = await await_if_needed(r.get, rk)
if empty(data) or not enable_cache:
log.debug('Not found in cache, or "r_cache" set to false. Calling wrapped async function.')
data = await await_if_needed(f, *args, **kwargs)
# If using an async cache adapter, r.get might be async...
await await_if_needed(r.set, rk, data, timeout=cache_time)
return data
return wrapper
return _decorator
def r_cache(cache_key: Union[str, callable], cache_time=300, format_args: list = None,
format_opt: FO = FO.POS_AUTO, **opts) -> Any:
"""
This is a decorator which caches the result of the wrapped function with the global cache adapter from
:py:mod:`privex.helpers.cache` using the key ``cache_key`` and with an expiry of ``cache_time`` seconds.
Future calls to the wrapped function would then load the data from cache until the cache expires, upon which it
will re-run the original code and re-cache it.
To bypass the cache, pass kwarg ``r_cache=False`` to the wrapped function. To override the cache key on demand,
pass ``r_cache_key='mykey'`` to the wrapped function.
**Example usage**:
>>> from privex.helpers import r_cache
>>>
>>> @r_cache('mydata', cache_time=600)
... def my_func(*args, **kwargs):
... time.sleep(60)
... return "done"
This will run the function and take 60 seconds to return while it sleeps
>>> my_func()
done
This will run instantly because "done" is now cached for 600 seconds
>>> my_func()
done
This will take another 60 seconds to run because ``r_cache`` is set to `False` (disables the cache)
>>> my_func(r_cache=False)
done
**Using a dynamic cache_key**:
**Simplest and most reliable - pass ``r_cache_key`` as an additional kwarg**
If you don't mind passing an additional kwarg to your function, then the most reliable method is to override
the cache key by passing ``r_cache_key`` to your wrapped function.
Don't worry, we remove both ``r_cache`` and ``r_cache_key`` from the kwargs that actually hit your function.
>>> my_func(r_cache_key='somekey') # Use the cache key 'somekey' when caching data for this function
**Option 2. Pass a callable which takes the same arguments as the wrapped function**
In the example below, ``who`` takes two arguments: ``name`` and ``title`` - we then pass the function
``make_key`` which takes the same arguments - ``r_cache`` will detect that the cache key is a function
and call it with the same ``(*args, **kwargs)`` passed to the wrapped function.
>>> from privex.helpers import r_cache
>>>
>>> def make_key(name, title):
... return f"mycache:{name}"
...
>>> @r_cache(make_key)
... def who(name, title):
... return "Their name is {title} {name}"
...
We can also obtain the same effect with a ``lambda`` callable defined directly inside of the cache_key.
>>> @r_cache(lambda name,title: f"mycache:{name}")
... def who(name, title):
... return "Their name is {title} {name}"
**Option 3. Can be finnicky - using ``format_args`` to integrate with existing code**
If you can't change how your existing function/method is called, then you can use the ``format_args`` feature.
**NOTE:** Unless you're forcing the usage of kwargs with a function/method, it's strongly recommended that you
keep ``force_pos`` enabled, and specify both the positional argument ID, and the kwarg name.
Basic Example:
>>> from privex.helpers import r_cache
>>> import time
>>>
>>> @r_cache('some_cache:{}:{}', cache_time=600, format_args=[0, 1, 'x', 'y'])
... def some_func(x=1, y=2):
... time.sleep(5)
... return 'x + y = {}'.format(x + y)
>>>
Using positional arguments, we can see from the debug log that it's formatting the ``{}:{}`` in the key
with ``x:y``
>>> some_func(1, 2)
2019-08-21 06:58:29,823 lg DEBUG Trying to load "some_cache:1:2" from cache
2019-08-21 06:58:29,826 lg DEBUG Not found in cache, or "r_cache" set to false. Calling wrapped function.
'x + y = 3'
>>> some_func(2, 3)
2019-08-21 06:58:34,831 lg DEBUG Trying to load "some_cache:2:3" from cache
2019-08-21 06:58:34,832 lg DEBUG Not found in cache, or "r_cache" set to false. Calling wrapped function.
'x + y = 5'
When we passed ``(1, 2)`` and ``(2, 3)`` it had to re-run the function for each. But once we re-call it for
the previously ran ``(1, 2)`` - it's able to retrieve the cached result just for those args.
>>> some_func(1, 2)
2019-08-21 06:58:41,752 lg DEBUG Trying to load "some_cache:1:2" from cache
'x + y = 3'
Be warned that the default format option ``POS_AUTO`` will make kwargs' values be specified in the same order as
they were listed in ``format_args``
>>> some_func(y=1, x=2) # ``format_args`` has the kwargs in the order ``['x', 'y']`` thus ``.format(x,y)``
2019-08-21 06:58:58,611 lg DEBUG Trying to load "some_cache:2:1" from cache
2019-08-21 06:58:58,611 lg DEBUG Not found in cache, or "r_cache" set to false. Calling wrapped function.
'x + y = 3'
:param FormatOpt format_opt: (default: :py:attr:`.FormatOpt.POS_AUTO`) "Format option" - how should args/kwargs be
used when filling placeholders in the ``cache_key`` (see comments on FormatOption)
:param list format_args: A list of positional arguments numbers (e.g. ``[0, 1, 2]``) and/or kwargs
``['x', 'y', 'z']`` that should be used to format the `cache_key`
:param str cache_key: The cache key to store the cached data into, e.g. `mydata`
:param int cache_time: The amount of time in seconds to cache the result for (default: 300 seconds)
:keyword bool whitelist: (default: ``True``) If True, only use specified arg positions / kwarg keys when formatting
``cache_key`` placeholders. Otherwise, trust whatever args/kwargs were passed to the func.
:return Any res: The return result, either from the wrapped function, or from the cache.
"""
fmt_args = [] if not format_args else format_args
r = cached
whitelist = opts.get('whitelist', True)
def _decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
# Extract r_cache and r_cache_key from the wrapped function's kwargs if they're specified,
# then remove them from the kwargs so they don't interfere with the wrapped function.
enable_cache, rk = kwargs.get('r_cache', True), kwargs.get('r_cache_key', cache_key)
if 'r_cache' in kwargs: del kwargs['r_cache']
if 'r_cache_key' in kwargs: del kwargs['r_cache_key']
if callable(rk):
rk = rk(*args, **kwargs)
elif not empty(fmt_args, itr=True) or not whitelist:
# If the cache key contains a format placeholder, e.g. {somevar} - then attempt to replace the
# placeholders using the function's kwargs
log.debug('Format_args not empty (or whitelist=False), formatting cache_key "%s"', cache_key)
rk = _format_key(args, kwargs, cache_key=cache_key, whitelist=whitelist, fmt_opt=format_opt, fmt_args=format_args)
log.debug('Trying to load "%s" from cache', rk)
data = r.get(rk)
if empty(data) or not enable_cache:
log.debug('Not found in cache, or "r_cache" set to false. Calling wrapped function.')
data = f(*args, **kwargs)
r.set(rk, data, timeout=cache_time)
return data
return wrapper
return _decorator
def mock_decorator(*dec_args, **dec_kwargs):
"""
This decorator is a pass-through decorator which does nothing other than be a decorator.
It's designed to be used with the :class:`privex.helpers.common.Mocker` class when mocking classes/modules,
allowing you to add fake decorators to the mock class/method which do nothing, other than act like a decorator
without breaking your functions/methods.
"""
def _decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
return _decorator
|
"""Update version numbers everywhere based on git tags."""
import os
import re
import json
import fileinput
import contextlib
import subprocess
from packaging import version
from py import path as py_path # pylint: disable=no-name-in-module,no-member
def subpath(*args):
return os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', *args))
@contextlib.contextmanager
def file_input(*args, **kwargs):
"""Context manager for a FileInput object."""
input_fo = fileinput.FileInput(*args, **kwargs)
try:
yield input_fo
finally:
input_fo.close()
class VersionUpdater():
"""
Version number synchronisation interface.
Updates the version information in
* setup.json
* src/t4me/__init__.py
to the current version number.
The current version number is either parsed from the output of ``git describe --tags --match v*.*.*``, or if the command fails for
any reason, from setup.json. The current version number is decided on init, syncronization can be executed by calling ``.sync()``.
"""
version_pat = re.compile(r'\d+.\d+.\d+')
init_version_pat = re.compile(r'(__version__ = )([\'"])(.*)([\'"])',
re.DOTALL | re.MULTILINE)
def __init__(self):
"""Initialize with documents that should be kept up to date and actual version."""
self.top_level_init = py_path.local(
subpath('src', 't4me', '__init__.py'))
self.setup_json = py_path.local(subpath('setup.json'))
self.conf_file = py_path.local(subpath('docs', 'conf.py'))
self.version = self.get_version()
def write_to_doc(self):
"""Write version to the docs."""
with open(self.conf_file.strpath, 'r') as conf_fo:
lines = conf_fo.readlines()
for index, line in enumerate(lines):
if 'version = ' in line:
lines[index] = "version = '" + str(self.version).rsplit(
'.', 1)[0] + "'\n"
if 'release = ' in line:
lines[index] = "release = '" + str(self.version) + "'\n"
with open(self.conf_file.strpath, 'w') as conf_fo:
conf_fo.writelines(lines)
def write_to_init(self):
"""Write version to init."""
init_content = self.top_level_init.read()
self.top_level_init.write(
re.sub(self.init_version_pat,
r'\1\g<2>{}\4'.format(str(self.version)), init_content,
re.DOTALL | re.MULTILINE))
def write_to_setup(self):
"""Write the updated version number to the setup file."""
setup = json.load(self.setup_json)
setup['version'] = str(self.version)
with open(self.setup_json.strpath, 'w') as setup_fo:
json.dump(setup, setup_fo, indent=4, sort_keys=True)
@property
def setup_version(self):
"""Fetch version from setup.json."""
return version.parse(json.load(self.setup_json)['version'])
@property
def doc_version(self):
"""Fetch version from docs."""
version_string = None
with open(self.conf_file.strpath, 'r') as conf_fo:
for line in conf_fo:
if 'release = ' in line:
version_string = line.split('=')[1].strip()
if version_string is None:
print('Could not determine the doc version string')
return version.parse(version_string)
@property
def init_version(self):
"""Fetch version from the init file."""
match = re.search(self.init_version_pat, self.top_level_init.read())
if not match:
raise AttributeError(
'No __version__ found in top-level __init__.py')
return version.parse(match.groups()[2])
@property
def tag_version(self):
"""Get the current version number from ``git describe``, fall back to setup.json."""
try:
describe_byte_string = subprocess.check_output(
['git', 'describe', '--tags', '--match', 'v*.*.*'])
version_string = re.findall(
self.version_pat, describe_byte_string.decode('utf-8'))[0]
except subprocess.CalledProcessError:
with open(self.setup_json.strpath, 'r') as setup_fo:
setup = json.load(setup_fo)
version_string = setup['version']
return version.parse(version_string)
def get_version(self):
return max(self.setup_version, self.init_version, self.tag_version)
def sync(self):
"""Update respective versions."""
if self.version > self.init_version:
self.write_to_init()
if self.version > self.setup_version:
self.write_to_setup()
if __name__ == '__main__':
VERSION_UPDATER = VersionUpdater()
VERSION_UPDATER.sync()
|
#!/usr/bin/env python
# Copyright (c) 2015, Herman Bergwerf. All rights reserved.
# Use of this source code is governed by a MIT-style license
# that can be found in the LICENSE file.
text="Hello, World!"
if ( len(text) > 0 )
print( "Hello, World!" )
|
from scout.parse.omim import (parse_omim_line, parse_genemap2, parse_mim_titles,
parse_mim2gene, get_mim_phenotypes)
GENEMAP_LINES = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this"\
" file adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# See end of file for additional documentation on specific fields\n",
"# Chromosome\tGenomic Position Start\tGenomic Position End\tCyto"\
" Location\tComputed Cyto Location\tMim Number\tGene Symbols\tGene Name"\
"\tApproved Symbol\tEntrez Gene ID\tEnsembl Gene ID\tComments\t"\
"Phenotypes\tMouse Gene Symbol/ID\n",
"chr1\t1232248\t1235040\t1p36.33\t\t615291\tB3GALT6, SEMDJL1, EDSP2\t"\
"UDP-Gal:beta-Gal beta-1,3-galactosyltransferase polypeptide 6\tB3GALT"\
"6\t126792\tENSG00000176022\t\tEhlers-Danlos syndrome, progeroid type,"\
" 2, 615349 (3), Autosomal recessive; Spondyloepimetaphyseal dysplasia"\
" with joint laxity, type 1, with or without fractures, 271640 (3),"\
" Autosomal recessive\tB3galt6 (MGI:2152819)\n",
]
MIM2GENE_LINES = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this file "\
"adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# This file provides links between the genes in OMIM and other gene"\
" identifiers.\n",
"# THIS IS NOT A TABLE OF GENE-PHENOTYPE RELATIONSHIPS.\n"
"# MIM Number\tMIM Entry Type (see FAQ 1.3 at https://omim.org/help/faq)\t"\
"Entrez Gene ID (NCBI)\tApproved Gene Symbol (HGNC)\tEnsembl Gene ID (Ensembl)\n",
"615291\tgene\t126792\tB3GALT6\tENSG00000176022,ENST00000379198",
"615349\tphenotype",
"271640\tphenotype",
]
def test_parse_omim_line():
## GIVEN a header and a line
header = ['a', 'b', 'c']
line = '1\t2\t3'
## WHEN parsing the omim line
res = parse_omim_line(line, header)
## THEN assert a dict was built by the header and the line
assert res['a'] == '1'
assert res['b'] == '2'
assert res['c'] == '3'
def test_parse_genemap():
for res in parse_genemap2(GENEMAP_LINES):
assert res['Chromosome'] == 'chr1'
assert res['mim_number'] == 615291
assert res['hgnc_symbol'] == 'B3GALT6'
assert res['inheritance'] == set(['AR'])
for phenotype in res['phenotypes']:
assert phenotype['mim_number']
assert phenotype['inheritance']
def test_parse_genemap_file(genemap_handle):
for i,res in enumerate(parse_genemap2(genemap_handle)):
assert 'mim_number' in res
assert i > 0
def test_parse_mim2gene():
## GIVEN some lines from a mim2gene file
mim2gene_info = parse_mim2gene(MIM2GENE_LINES)
## WHEN parsing the lines
first_entry = next(mim2gene_info)
## ASSERT that they are correctly parsed
# First entry is a gene so it should have a hgnc symbol
assert first_entry['mim_number'] == 615291
assert first_entry['entry_type'] == 'gene'
assert first_entry['hgnc_symbol'] == 'B3GALT6'
def test_parse_mim2gene_file(mim2gene_handle):
# Just check that the file exists and that some result is given
for i,res in enumerate(parse_mim2gene(mim2gene_handle)):
assert 'mim_number' in res
assert i > 0
def test_get_mim_phenotypes():
## GIVEN a small testdata set
# This will return a dictionary with mim number as keys and
# phenotypes as values
## WHEN parsing the phenotypes
phenotypes = get_mim_phenotypes(genemap_lines=GENEMAP_LINES)
## THEN assert they where parsed in a correct way
# There was only one line in GENEMAP_LINES that have two phenotypes
# so we expect that there should be two phenotypes
assert len(phenotypes) == 2
term = phenotypes[615349]
assert term['inheritance'] == set(['AR'])
assert term['hgnc_symbols'] == set(['B3GALT6'])
def test_get_mim_phenotypes_file(genemap_handle):
phenotypes = get_mim_phenotypes(genemap_lines=genemap_handle)
for i, mim_nr in enumerate(phenotypes):
assert phenotypes[mim_nr]['mim_number']
assert i > 0
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from pathlib import Path
import yaml
import sqlite3
import subprocess
# user lib
import yaml_utils
import file_search
import db_utils
def parser():
# parser information setup
prog='parse_design'
description = 'Parse verilog/systemverilog source files' + \
'to construct design hierarchy read on database creation'
usage = 'usage: python3 {} ' .format(__file__)
usage += '[-o] [-c] [-i] [-d] [-p] [-t]'
parse = ArgumentParser(
prog=prog,
description=description,
usage=usage,
add_help=True
)
# Output Filename
parse.add_argument(
'-o',
'--output',
type=str,
action='store',
default='design.db',
help='Set output sqlite3 database file name (Default: design.db)'
)
# Directory parsing configuration file
parse.add_argument(
'-c',
'--config',
type=str,
action='store',
default='config.yaml',
help='Set directory parsing configuration (Default: config.yaml)'
)
# Directory structure configuration file
parse.add_argument(
'-i',
'--incdir',
type=str,
action='store',
default='incdir.yaml',
help='Set include directory structure configuration (Default: incdir.yaml)'
)
# Directory structure configuration file
parse.add_argument(
'-d',
'--dir',
type=str,
action='store',
default='dir.yaml',
help='Set directory structure configuration (Default: dir.yaml)'
)
# Dump target directory of module dependency files (yaml)
parse.add_argument(
'-t',
'--target',
type=str,
action='store',
default='design',
help='Set target directory to output module dependency yaml files (Default:design)'
)
return parse.parse_args()
def v_check(file_list, target, inc_conf_file) :
# Arguments
# file_list: list of verilog files to be parsed
# target: output directory of yaml
# inc_conf_file: yaml file for include directory structures (incdir.yml)
proc_list = []
for f in file_list :
path = str(f.absolute())
try:
print('parsing ' + path)
# Execute perl scripts to extract submodules in each design and output yaml.
# perl ./scripts/v_check.pl -t ./yaml/design \
# -d design.sv -i ./yaml/incdir.yml
cmd = ['perl', './scripts/v_check.pl']
cmd += ['-t', target]
cmd += ['-i', inc_conf_file]
cmd += ['-d', path]
proc_list.append(subprocess.Popen(cmd))
except subprocess.CalledProcessError:
print('Failed execution of v_check.pl', file=sys.stderr)
sys.exit(1)
# wait all of subprocess finishes
for p in proc_list :
p.wait()
if __name__ == '__main__' :
# options analysis
options = parser()
conf_file = options.config
dir_conf_file = options.dir
inc_conf_file = options.incdir
out_file = options.output
target = options.target
# file parse configuration
config = yaml_utils.read_yaml(conf_file)
dir_config = yaml_utils.read_yaml(dir_conf_file)
inc_config = yaml_utils.read_yaml(inc_conf_file)
# parse files to create source file and include directory list
top_dir = Path('..').resolve()
rtl_ext = config['verilog-src'] + config['systemverilog-src']
file_list = file_search.listup_files(top_dir, rtl_ext, dir_config)
inc_list = file_search.listup_dirs(top_dir, inc_config)
# Extract submodules in each design
v_check(file_list, target, inc_conf_file)
# Register each module and its dependency into sqlite3-based database
# table name is named after each module name
# table entry has three columns.
# mod : submodule name
# inst : instance name of the submodule
# create sqlite3 cursor
db = sqlite3.connect(out_file)
c = db.cursor()
# create table for each module with submodules
for f in file_search.get_files(Path(target),".yml") :
design_name = f.stem
design_hier = yaml_utils.read_yaml(str(f.absolute()))
# Do not create table for leaf modules
if len(design_hier) == 0 :
continue
keys = 'inst varchar(32), '
keys += 'mod varchar(32)'
if not db_utils.check_table(c, design_name) :
print("Newly create table: " + design_name)
db_utils.create_table(c, design_name, keys)
for sub_inst in design_hier.keys() :
sub_mod = design_hier[sub_inst]
# if given sub_inst does not exist in table
if not db_utils.check_db(c, design_name, "inst", sub_inst) :
keys = 'mod, inst'
values = '\'' + sub_mod + '\', '
values += '\'' + sub_inst + '\''
db_utils.register_db(c, design_name, keys, values)
# end database operation
db.commit()
db.close()
|
import boto3
import json
from datetime import datetime
dynamodb = boto3.client('dynamodb')
def get_counter():
response = dynamodb.get_item(
TableName = 'alert-log',
Key = {
'messageID': {
'S': 'counter'
}
}
)
return int(response['Item']['message']['S'])
def set_counter(counter):
response = dynamodb.put_item(
TableName = 'alert-log',
Item = {
'messageID': {
'S': 'counter'
},
'message': {
'S': counter
}
}
)
return response
def get_escalation_target():
response = dynamodb.get_item(
TableName = 'escalation_target',
Key = {
'responsibility': {
'S': datetime.now().strftime("%A")
}
}
)
escalationTarget = {
'name': response['Item']['escalationTarget']['S'],
'number': response['Item']['escalationNumber']['S'],
'team': response['Item']['escalationTeam']['S']
}
return escalationTarget
def save_incident(incident):
response = dynamodb.put_item(
TableName = 'alert-log',
Item = {
'messageID': {
'S': incident['id']
},
'message': {
'S': incident['message']
},
'priority': {
'S': incident['priority']
},
'currentStatus': {
'S': 'open'
},
'timestamp': {
'S': incident['timestamp']
},
'escalationTarget': {
'S': incident['escalationTarget']
}
}
)
return response
def contact_escalation_target(escalationTarget, incident):
lambda_client = boto3.client('lambda')
response = lambda_client.invoke(
FunctionName='arn:aws:lambda:eu-west-1:746022503515:function:Contact_Escalation_Target',
InvocationType='Event',
Payload=bytes(json.dumps({
'escalationTarget': escalationTarget,
'incident': incident
}), "utf-8")
)
return response
def lambda_handler(event, context):
sns_msg = json.loads(event['Records'][0]['Sns']['Message'])
print(sns_msg)
new_counter = str(get_counter() + 1)
escalationTarget = get_escalation_target()
incident = {
'id': new_counter,
'timestamp': str(datetime.now().timestamp()),
'priority': sns_msg['priority'],
'message': sns_msg['message'],
'escalationTarget': escalationTarget['name']
}
save_incident(incident)
set_counter(new_counter)
contact_escalation_target(escalationTarget, incident)
print('[info] Escalation target ' + escalationTarget['name'] + ' with phone number: ' + escalationTarget['number'] + ' has been called with message: "' + incident['message'] + ' "')
return { 'statusCode': 200 }
|
import xlsxwriter
import os
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import logging
import logging.handlers
from datetime import datetime
# logging
LOG_FILENAME = "scrap_merit_info_acpdc.out"
my_logger = logging.getLogger('ScrapMeritInfoLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=10*1024*1024, backupCount=5)
my_logger.addHandler(handler)
my_logger.info('\n\n-----------------------------------------------------')
my_logger.info('Time of Execution: {}'.format(datetime.now()))
my_logger.info('-----------------------------------------------------\n\n')
URL = "http://acpdc.in/result/result_search.asp"
# FILENAMES = []
bad_chars = r'\xa0\t\n\\'
rgx = re.compile('[%s]' % bad_chars)
# scrap merit info from url with roll no. range from l to h
def main(l, h):
try:
FILENAME = "{}-{}.xlsx".format(l, h)
my_logger.info("open(out_file): {}".format(FILENAME))
out_file = xlsxwriter.Workbook(FILENAME)
sheet = out_file.add_worksheet()
row = 1
col = 0
sheet.write('A1', 'Merit No')
sheet.write('B1', 'Roll No')
sheet.write('C1', 'Name')
sheet.write('D1', 'Allotted Institute Name')
sheet.write('E1', 'Allotted Course Name')
sheet.write('F1', 'Allotted Category')
sheet.write('G1', 'Basic Category')
sheet.write('H1', 'Allotted Status')
driver = webdriver.Chrome()
driver.get(URL)
for gid in range(l, h):
my_logger.info("Getting merit info for gid: {}".format(gid))
elm = driver.find_element_by_name('inno')
elm.clear()
elm.send_keys(str(gid) + Keys.RETURN)
soup = BeautifulSoup(driver.page_source, 'html.parser')
tables = soup.find_all('table')
if not tables:
driver.back()
continue
rows = tables[0].find_all('tr')
data = [[rgx.sub('', td.text) for td in tr.findAll("td")] for tr in rows]
sheet.write(row, 0, data[1][1])
sheet.write(row, 1, data[1][3])
sheet.write(row, 2, data[2][1])
sheet.write(row, 3, data[3][1])
sheet.write(row, 4, data[4][1])
sheet.write(row, 5, data[5][1])
sheet.write(row, 6, data[5][3])
sheet.write(row, 7, data[6][1])
row += 1
driver.back()
sleep(0.05)
except KeyboardInterrupt:
sys.exit(0)
finally:
my_logger.info("------------------------------------------------------------\n")
if out_file:
out_file.close()
if driver:
driver.close()
if __name__ == '__main__':
for i in range(1000000, 1039490, 10000):
l = i
if (i == 1030000):
h = 1039491
else:
h = i + 10000
my_logger.info("-------------------------------\n")
my_logger.info("l = {}\nh = {}\n".format(l, h))
main(l, h)
|
from contextlib import contextmanager
import csv
import operator
from sqlalchemy import CHAR
from sqlalchemy import column
from sqlalchemy import exc
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import util
from sqlalchemy import VARCHAR
from sqlalchemy.engine import default
from sqlalchemy.engine import result as _result
from sqlalchemy.engine import Row
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
from sqlalchemy.sql.selectable import TextualSelect
from sqlalchemy.sql.sqltypes import NULLTYPE
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing import le_
from sqlalchemy.testing import ne_
from sqlalchemy.testing import not_in_
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.util import collections_abc
class ResultProxyTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", Integer, ForeignKey("users.user_id")),
Column("address", String(30)),
test_needs_acid=True,
)
Table(
"users2",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_row_iteration(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
r = users.select().execute()
rows = []
for row in r:
rows.append(row)
eq_(len(rows), 3)
def test_row_next(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
r = users.select().execute()
rows = []
while True:
row = next(r, "foo")
if row == "foo":
break
rows.append(row)
eq_(len(rows), 3)
@testing.requires.subqueries
def test_anonymous_rows(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
sel = (
select([users.c.user_id])
.where(users.c.user_name == "jack")
.scalar_subquery()
)
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
eq_(row["anon_1"], 8)
eq_(row["anon_2"], 10)
def test_row_comparison(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="jack")
rp = users.select().execute().first()
eq_(rp, rp)
is_(not (rp != rp), True)
equal = (7, "jack")
eq_(rp, equal)
eq_(equal, rp)
is_((not (rp != equal)), True)
is_(not (equal != equal), True)
def endless():
while True:
yield 1
ne_(rp, endless())
ne_(endless(), rp)
# test that everything compares the same
# as it would against a tuple
for compare in [False, 8, endless(), "xyz", (7, "jack")]:
for op in [
operator.eq,
operator.ne,
operator.gt,
operator.lt,
operator.ge,
operator.le,
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table("content", self.metadata, Column("type", String(30)))
bar = Table("bar", self.metadata, Column("content_type", String(30)))
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
in_(content.c.type, row)
not_in_(bar.c.content_type, row)
row = testing.db.execute(
select([func.now().label("content_type")])
).first()
not_in_(content.c.type, row)
not_in_(bar.c.content_type, row)
def test_pickled_rows(self):
users = self.tables.users
users.insert().execute(
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
)
for pickle in False, True:
for use_labels in False, True:
result = (
users.select(use_labels=use_labels)
.order_by(users.c.user_id)
.execute()
.fetchall()
)
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(result, [(7, "jack"), (8, "ed"), (9, "fred")])
if use_labels:
eq_(result[0]["users_user_id"], 7)
eq_(
list(result[0].keys()),
["users_user_id", "users_user_name"],
)
else:
eq_(result[0]["user_id"], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]["fake key"]
)
def test_column_error_printing(self):
result = testing.db.execute(select([1]))
row = result.first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError, msg % repl, result._getter, accessor
)
is_(result._getter(accessor, False), None)
assert_raises_message(
exc.NoSuchColumnError, msg % repl, lambda: row[accessor]
)
def test_fetchmany(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="jack")
users.insert().execute(user_id=8, user_name="ed")
users.insert().execute(user_id=9, user_name="fred")
r = users.select().execute()
rows = []
for row in r.fetchmany(size=2):
rows.append(row)
eq_(len(rows), 2)
def test_column_slices(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name="john")
users.insert().execute(user_id=2, user_name="jack")
addresses.insert().execute(
address_id=1, user_id=2, address="foo@bar.com"
)
r = text("select * from addresses", bind=testing.db).execute().first()
eq_(r[0:1], (1,))
eq_(r[1:], (2, "foo@bar.com"))
eq_(r[:-1], (1, 2))
def test_column_accessor_basic_compiled(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
r = users.select(users.c.user_id == 2).execute().first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r[users.c.user_id], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
eq_(r[users.c.user_name], "jack")
def test_column_accessor_basic_text(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
r = testing.db.execute(
text("select * from users where user_id=2")
).first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
def test_column_accessor_textual_select(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="jack"),
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([column("user_id"), column("user_name")])
.select_from(table("users"))
.where(text("user_id=2"))
).first()
eq_(r.user_id, 2)
eq_(r["user_id"], 2)
eq_(r.user_name, "jack")
eq_(r["user_name"], "jack")
def test_column_accessor_dotted_union(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# test a little sqlite < 3.10.0 weirdness - with the UNION,
# cols come back as "users.user_id" in cursor.description
r = testing.db.execute(
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users"
)
).first()
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_raw(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
r = (
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db,
)
.execution_options(sqlite_raw_colnames=True)
.execute()
.first()
)
if testing.against("sqlite < 3.10.0"):
not_in_("user_id", r)
not_in_("user_name", r)
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
else:
not_in_("users.user_id", r)
not_in_("users.user_name", r)
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_sqlite_translated(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
r = (
text(
"select users.user_id, users.user_name "
"from users "
"UNION select users.user_id, "
"users.user_name from users",
bind=testing.db,
)
.execute()
.first()
)
eq_(r["user_id"], 1)
eq_(r["user_name"], "john")
if testing.against("sqlite < 3.10.0"):
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
else:
not_in_("users.user_id", r)
not_in_("users.user_name", r)
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# test using literal tablename.colname
r = (
text(
'select users.user_id AS "users.user_id", '
'users.user_name AS "users.user_name" '
"from users",
bind=testing.db,
)
.execution_options(sqlite_raw_colnames=True)
.execute()
.first()
)
eq_(r["users.user_id"], 1)
eq_(r["users.user_name"], "john")
not_in_("user_name", r)
eq_(list(r.keys()), ["users.user_id", "users.user_name"])
def test_column_accessor_unary(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
# unary expressions
r = (
select([users.c.user_name.distinct()])
.order_by(users.c.user_name)
.execute()
.first()
)
eq_(r[users.c.user_name], "john")
eq_(r.user_name, "john")
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr,
r,
"foo",
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r["foo"],
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
users = self.tables.users
conn = testing.db.connect()
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter("user"),
lambda r: r._has_key("user"),
]:
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth,
result,
)
trans.rollback()
def test_fetchone_til_end(self):
result = testing.db.execute("select * from users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone,
)
def test_connectionless_autoclose_rows_exhausted(self):
users = self.tables.users
users.insert().execute(dict(user_id=1, user_name="john"))
result = testing.db.execute("select * from users")
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), (1, "john"))
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
@testing.requires.returning
def test_connectionless_autoclose_crud_rows_exhausted(self):
users = self.tables.users
stmt = (
users.insert()
.values(user_id=1, user_name="john")
.returning(users.c.user_id)
)
result = testing.db.execute(stmt)
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), (1,))
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
def test_connectionless_autoclose_no_rows(self):
result = testing.db.execute("select * from users")
connection = result.connection
assert not connection.closed
eq_(result.fetchone(), None)
assert connection.closed
@testing.requires.updateable_autoincrement_pks
def test_connectionless_autoclose_no_metadata(self):
result = testing.db.execute("update users set user_id=5")
connection = result.connection
assert connection.closed
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows.",
result.fetchone,
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
]
)
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
def test_row_case_sensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": True})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols"),
]
)
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"],
)
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
]
)
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_case_insensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select(
[
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols"),
]
)
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"],
)
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
eq_(row["screw_UP_the_cols"], 3)
def test_row_as_args(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, "john")])
def test_result_as_args(self):
users = self.tables.users
users2 = self.tables.users2
users.insert().execute(
[
dict(user_id=1, user_name="john"),
dict(user_id=2, user_name="ed"),
]
)
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, "john"), (2, "ed")],
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, "john"), (2, "ed")],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users = self.tables.users
addresses = self.tables.addresses
users.insert().execute(user_id=1, user_name="john")
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
result._getter,
"user_id",
)
# pure positional targeting; users.c.user_id
# and addresses.c.user_id are known!
# works as of 1.1 issue #3501
eq_(r[users.c.user_id], 1)
eq_(r[addresses.c.user_id], None)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table("fake", MetaData(), Column("user_id", Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id],
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r["user_id"],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
ua = users.alias()
u2 = users.alias()
result = (
select([users.c.user_id, ua.c.user_id])
.select_from(users.join(ua, true()))
.execute()
)
row = result.first()
# as of 1.1 issue #3501, we use pure positional
# targeting for the column objects here
eq_(row[users.c.user_id], 1)
eq_(row[ua.c.user_id], 1)
# this now works as of 1.1 issue #3501;
# previously this was stuck on "ambiguous column name"
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row",
lambda: row[u2.c.user_id],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_case_sensitive(self):
eng = engines.testing_engine(options=dict(case_sensitive=False))
row = eng.execute(
select(
[
literal_column("1").label("SOMECOL"),
literal_column("1").label("SOMECOL"),
]
)
).first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row["somecol"],
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
users = self.tables.users
addresses = self.tables.addresses
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name="john")
result = (
select([users.c.user_id, addresses.c.user_id])
.select_from(users.outerjoin(addresses))
.execute()
)
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True]),
)
def test_loose_matching_one(self):
users = self.tables.users
addresses = self.tables.addresses
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "john"})
conn.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = conn.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.address_id AS address_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[
users.c.user_id,
users.c.user_name,
addresses.c.address_id,
],
positional=False,
)
)
row = result.first()
eq_(row[users.c.user_id], 1)
eq_(row[users.c.user_name], "john")
def test_loose_matching_two(self):
users = self.tables.users
addresses = self.tables.addresses
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "john"})
conn.execute(
addresses.insert(),
{"address_id": 1, "user_id": 1, "address": "email"},
)
# use some column labels in the SELECT
result = conn.execute(
TextualSelect(
text(
"select users.user_name AS users_user_name, "
"users.user_id AS user_id, "
"addresses.user_id "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id "
"WHERE users.user_id=1 "
),
[users.c.user_id, users.c.user_name, addresses.c.user_id],
positional=False,
)
)
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[users.c.user_id],
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[addresses.c.user_id],
)
eq_(row[users.c.user_name], "john")
def test_ambiguous_column_by_col_plus_label(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="john")
result = select(
[
users.c.user_id,
type_coerce(users.c.user_id, Integer).label("foo"),
]
).execute()
row = result.first()
eq_(row[users.c.user_id], 1)
eq_(row[1], 1)
def test_fetch_partial_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
t = text("select * from users").columns(user_name=String())
eq_(testing.db.execute(t).fetchall(), [(7, "ed")])
def test_fetch_unordered_result_map(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
class Goofy1(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from users"
).columns(a=Goofy1(), b=Goofy2(), c=Goofy3())
eq_(testing.db.execute(t).fetchall(), [("eda", "edb", "edc")])
@testing.requires.subqueries
def test_column_label_targeting(self):
users = self.tables.users
users.insert().execute(user_id=7, user_name="ed")
for s in (
users.select().alias("foo"),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
eq_(row[s.c.user_id], 7)
eq_(row[s.c.user_name], "ed")
def test_keys(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
result = users.select().execute()
eq_(result.keys(), ["user_id", "user_name"])
row = result.first()
eq_(row.keys(), ["user_id", "user_name"])
def test_keys_anon_labels(self):
"""test [ticket:3483]"""
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
result = testing.db.execute(
select(
[
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column("1")),
]
).group_by(users.c.user_id, users.c.user_name)
)
eq_(result.keys(), ["user_id", "user_name_1", "count_1"])
row = result.first()
eq_(row.keys(), ["user_id", "user_name_1", "count_1"])
def test_items(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[("user_id", 1), ("user_name", "foo")],
)
def test_len(self):
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute("select user_name, user_id from users").first()
eq_(len(r), 2)
r = testing.db.execute("select user_name from users").first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users = self.tables.users
users.insert().execute(
dict(user_id=1, user_name="foo"),
dict(user_id=2, user_name="bar"),
dict(user_id=3, user_name="def"),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")])
eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], "foo")
eq_([x.lower() for x in list(r.keys())], ["user_id", "user_name"])
eq_(list(r.values()), [1, "foo"])
def test_column_order_with_text_query(self):
# should return values in query order
users = self.tables.users
users.insert().execute(user_id=1, user_name="foo")
r = testing.db.execute("select user_name, user_id from users").first()
eq_(r[0], "foo")
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ["user_name", "user_id"])
eq_(list(r.values()), ["foo", 1])
@testing.crashes("oracle", "FIXME: unknown, varify not fails_on()")
@testing.crashes("firebird", "An identifier must begin with a letter")
@testing.provide_metadata
def test_column_accessor_shadow(self):
shadowed = Table(
"test_shadowed",
self.metadata,
Column("shadow_id", INT, primary_key=True),
Column("shadow_name", VARCHAR(20)),
Column("parent", VARCHAR(20)),
Column("row", VARCHAR(40)),
Column("_parent", VARCHAR(20)),
Column("_row", VARCHAR(20)),
)
self.metadata.create_all()
shadowed.insert().execute(
shadow_id=1,
shadow_name="The Shadow",
parent="The Light",
row="Without light there is no shadow",
_parent="Hidden parent",
_row="Hidden row",
)
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
eq_(r.shadow_id, 1)
eq_(r["shadow_id"], 1)
eq_(r[shadowed.c.shadow_id], 1)
eq_(r.shadow_name, "The Shadow")
eq_(r["shadow_name"], "The Shadow")
eq_(r[shadowed.c.shadow_name], "The Shadow")
eq_(r.parent, "The Light")
eq_(r["parent"], "The Light")
eq_(r[shadowed.c.parent], "The Light")
eq_(r.row, "Without light there is no shadow")
eq_(r["row"], "Without light there is no shadow")
eq_(r[shadowed.c.row], "Without light there is no shadow")
eq_(r["_parent"], "Hidden parent")
eq_(r["_row"], "Hidden row")
def test_nontuple_row(self):
"""ensure the C version of BaseRow handles
duck-type-dependent rows.
As of 1.4 they are converted internally to tuples in any case.
"""
class MyList(object):
def __init__(self, data):
self.internal_list = data
def __len__(self):
return len(self.internal_list)
def __getitem__(self, i):
return list.__getitem__(self.internal_list, i)
proxy = Row(
object(),
[None],
{"key": (0, None, "key"), 0: (0, None, "key")},
MyList(["value"]),
)
eq_(list(proxy), ["value"])
eq_(proxy[0], "value")
eq_(proxy["key"], "value")
@testing.provide_metadata
def test_no_rowcount_on_selects_inserts(self):
"""assert that rowcount is only called on deletes and updates.
This because cursor.rowcount may can be expensive on some dialects
such as Firebird, however many dialects require it be called
before the cursor is closed.
"""
metadata = self.metadata
engine = engines.testing_engine()
t = Table("t1", metadata, Column("data", String(10)))
metadata.create_all(engine)
with patch.object(
engine.dialect.execution_ctx_cls, "rowcount"
) as mock_rowcount:
mock_rowcount.__get__ = Mock()
engine.execute(
t.insert(), {"data": "d1"}, {"data": "d2"}, {"data": "d3"}
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
eq_(
engine.execute(t.select()).fetchall(),
[("d1",), ("d2",), ("d3",)],
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
engine.execute(t.update(), {"data": "d4"})
eq_(len(mock_rowcount.__get__.mock_calls), 1)
engine.execute(t.delete())
eq_(len(mock_rowcount.__get__.mock_calls), 2)
def test_row_is_sequence(self):
row = Row(
object(), [None], {"key": (None, 0), 0: (None, 0)}, ["value"]
)
is_true(isinstance(row, collections_abc.Sequence))
def test_row_is_hashable(self):
row = Row(
object(),
[None, None, None],
{"key": (None, 0), 0: (None, 0)},
(1, "value", "foo"),
)
eq_(hash(row), hash((1, "value", "foo")))
@testing.provide_metadata
def test_row_getitem_indexes_compiled(self):
values = Table(
"rp",
self.metadata,
Column("key", String(10), primary_key=True),
Column("value", String(10)),
)
values.create()
testing.db.execute(values.insert(), dict(key="One", value="Uno"))
row = testing.db.execute(values.select()).first()
eq_(row["key"], "One")
eq_(row["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.only_on("sqlite")
def test_row_getitem_indexes_raw(self):
row = testing.db.execute("select 'One' as key, 'Uno' as value").first()
eq_(row["key"], "One")
eq_(row["value"], "Uno")
eq_(row[0], "One")
eq_(row[1], "Uno")
eq_(row[-2], "One")
eq_(row[-1], "Uno")
eq_(row[1:0:-1], ("Uno",))
@testing.requires.cextensions
def test_row_c_sequence_check(self):
metadata = MetaData()
metadata.bind = "sqlite://"
users = Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(40)),
)
users.create()
users.insert().execute(name="Test")
row = users.select().execute().fetchone()
s = util.StringIO()
writer = csv.writer(s)
# csv performs PySequenceCheck call
writer.writerow(row)
assert s.getvalue().strip() == "1,Test"
@testing.requires.selectone
def test_empty_accessors(self):
statements = [
(
"select 1",
[
lambda r: r.last_inserted_params(),
lambda r: r.last_updated_params(),
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols(),
lambda r: r.inserted_primary_key,
],
"Statement is not a compiled expression construct.",
),
(
select([1]),
[
lambda r: r.last_inserted_params(),
lambda r: r.inserted_primary_key,
],
r"Statement is not an insert\(\) expression construct.",
),
(
select([1]),
[lambda r: r.last_updated_params()],
r"Statement is not an update\(\) expression construct.",
),
(
select([1]),
[lambda r: r.prefetch_cols(), lambda r: r.postfetch_cols()],
r"Statement is not an insert\(\) "
r"or update\(\) expression construct.",
),
]
for stmt, meths, msg in statements:
r = testing.db.execute(stmt)
try:
for meth in meths:
assert_raises_message(
sa_exc.InvalidRequestError, msg, meth, r
)
finally:
r.close()
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = "once"
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"keyed1",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
)
Table("keyed2", metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table("keyed3", metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table("keyed4", metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table("content", metadata, Column("t", String(30), key="type"))
Table("bar", metadata, Column("ctype", String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
"wschema",
metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema,
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables[
"%s.wschema" % testing.config.test_schema
].insert().execute(dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables["%s.wschema" % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
def _test_keyed_targeting_no_label_at_all(self, expression):
lt = literal_column("2")
stmt = select([literal_column("1"), expression, lt]).select_from(
self.tables.keyed1
)
row = testing.db.execute(stmt).first()
eq_(row[expression], "a1")
eq_(row[lt], 2)
# Postgresql for example has the key as "?column?", which dupes
# easily. we get around that because we know that "2" is unique
eq_(row["2"], 2)
def test_keyed_targeting_no_label_at_all_one(self):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# explicit add
kw["add_to_result_map"](None, None, (element,), NULLTYPE)
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select([not_named_max()])), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm)
def test_keyed_targeting_no_label_at_all_two(self):
class not_named_max(expression.ColumnElement):
name = "not_named_max"
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
# we don't add to keymap here; compiler should be doing it
return "max(a)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select([not_named_max()])), "SELECT max(a)")
nnm = not_named_max()
self._test_keyed_targeting_no_label_at_all(nnm)
def test_keyed_targeting_no_label_at_all_text(self):
t1 = text("max(a)")
t2 = text("min(a)")
stmt = select([t1, t2]).select_from(self.tables.keyed1)
row = testing.db.execute(stmt).first()
eq_(row[t1], "a1")
eq_(row[t2], "a1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(
select([keyed1, keyed2]).select_from(keyed1.join(keyed2, true()))
).first()
# column access is unambiguous
eq_(row[self.tables.keyed2.c.b], "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "a"
)
# for "b" we have kind of a choice. the name "b" is not ambiguous in
# cursor.description in this case. It is however ambiguous as far as
# the objects we have queried against, because keyed1.c.a has key="b"
# and keyed1.c.b is "b". historically this was allowed as
# non-ambiguous, however the column it targets changes based on
# whether or not the dupe is present so it's ambiguous
# eq_(row.b, "b2")
assert_raises_message(
exc.InvalidRequestError, "Ambig", getattr, row, "b"
)
# illustrate why row.b above is ambiguous, and not "b2"; because
# if we didn't have keyed2, now it matches row.a. a new column
# shouldn't be able to grab the value from a previous column.
row = testing.db.execute(select([keyed1])).first()
eq_(row.b, "a1")
def test_keyed_accessor_composite_conflict_2_fix_w_uselabels(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(
select([keyed1, keyed2])
.select_from(keyed1.join(keyed2, true()))
.apply_labels()
).first()
# column access is unambiguous
eq_(row[self.tables.keyed2.c.b], "b2")
eq_(row["keyed2_b"], "b2")
eq_(row["keyed1_a"], "a1")
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(
select([keyed1, keyed4]).select_from(keyed1.join(keyed4, true()))
).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(
select([keyed1, keyed3]).select_from(keyed1.join(keyed3, true()))
).first()
eq_(row.q, "c1")
# prior to 1.4 #4887, this raised an "ambiguous column name 'a'""
# message, because "b" is linked to "a" which is a dupe. but we know
# where "b" is in the row by position.
eq_(row.b, "a1")
# "a" is of course ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr,
row,
"a",
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(
select([keyed1, keyed2])
.select_from(keyed1.join(keyed2, true()))
.apply_labels()
).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row["keyed2_c"])
assert_raises(KeyError, lambda: row["keyed2_q"])
def test_keyed_accessor_column_is_repeated_multiple_times(self):
# test new logic added as a result of the combination of #4892 and
# #4887. We allow duplicate columns, but we also have special logic
# to disambiguate for the same column repeated, and as #4887 adds
# stricter ambiguous result column logic, the compiler has to know to
# not add these dupe columns to the result map, else they register as
# ambiguous.
keyed2 = self.tables.keyed2
keyed3 = self.tables.keyed3
stmt = (
select(
[
keyed2.c.a,
keyed3.c.a,
keyed2.c.a,
keyed2.c.a,
keyed3.c.a,
keyed3.c.a,
keyed3.c.d,
keyed3.c.d,
]
)
.select_from(keyed2.join(keyed3, true()))
.apply_labels()
)
result = testing.db.execute(stmt)
is_false(result._metadata.matched_on_name)
# ensure the result map is the same number of cols so we can
# use positional targeting
eq_(
[rec[0] for rec in result.context.compiled._result_columns],
[
"keyed2_a",
"keyed3_a",
"keyed2_a__1",
"keyed2_a__1",
"keyed3_a__1",
"keyed3_a__1",
"keyed3_d",
"keyed3_d__1",
],
)
row = result.first()
# keyed access will ignore the dupe cols
eq_(row[keyed2.c.a], "a2")
eq_(row[keyed3.c.a], "a3")
eq_(result._getter(keyed3.c.a)(row), "a3")
eq_(row[keyed3.c.d], "d3")
# however we can get everything positionally
eq_(row, ("a2", "a3", "a2", "a2", "a3", "a3", "d3", "d3"))
eq_(row[0], "a2")
eq_(row[1], "a3")
eq_(row[2], "a2")
eq_(row[3], "a2")
eq_(row[4], "a3")
eq_(row[5], "a3")
eq_(row[6], "d3")
eq_(row[7], "d3")
def test_columnclause_schema_column_one(self):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("a"), sql.column("b")
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
in_(a, row)
in_(b, row)
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
in_(keyed2.c.a, row)
in_(keyed2.c.b, row)
def test_columnclause_schema_column_three(self):
# this is also addressed by [ticket:2932]
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
in_(stmt.selected_columns.a, row)
in_(stmt.selected_columns.b, row)
def test_columnclause_schema_column_four(self):
# originally addressed by [ticket:2932], however liberalized
# Column-targeting rules are deprecated
a, b = sql.column("keyed2_a"), sql.column("keyed2_b")
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b
)
row = testing.db.execute(stmt).first()
in_(a, row)
in_(b, row)
in_(stmt.selected_columns.keyed2_a, row)
in_(stmt.selected_columns.keyed2_b, row)
def test_columnclause_schema_column_five(self):
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR
)
row = testing.db.execute(stmt).first()
in_(stmt.selected_columns.keyed2_a, row)
in_(stmt.selected_columns.keyed2_b, row)
class PositionalTextTest(fixtures.TablesTest):
run_inserts = "once"
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"text1",
metadata,
Column("a", CHAR(2)),
Column("b", CHAR(2)),
Column("c", CHAR(2)),
Column("d", CHAR(2)),
)
@classmethod
def insert_data(cls):
cls.tables.text1.insert().execute(
[dict(a="a1", b="b1", c="c1", d="d1")]
)
def test_via_column(self):
c1, c2, c3, c4 = column("q"), column("p"), column("r"), column("d")
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
eq_(row[c4], "d1")
eq_(row[1], "b1")
eq_(row["b"], "b1")
eq_(row.keys(), ["a", "b", "c", "d"])
eq_(row["r"], "c1")
eq_(row["d"], "d1")
def test_fewer_cols_than_sql_positional(self):
c1, c2 = column("q"), column("p")
stmt = text("select a, b, c, d from text1").columns(c1, c2)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row["c"], "c1")
def test_fewer_cols_than_sql_non_positional(self):
c1, c2 = column("a"), column("p")
stmt = text("select a, b, c, d from text1").columns(c2, c1, d=CHAR)
# no warning as this can be similar for non-positional
result = testing.db.execute(stmt)
row = result.first()
# c1 name matches, locates
eq_(row[c1], "a1")
eq_(row["c"], "c1")
# c2 name does not match, doesn't locate
assert_raises_message(
exc.NoSuchColumnError, "in row for column 'p'", lambda: row[c2]
)
def test_more_cols_than_sql(self):
c1, c2, c3, c4 = column("q"), column("p"), column("r"), column("d")
stmt = text("select a, b from text1").columns(c1, c2, c3, c4)
with assertions.expect_warnings(
r"Number of columns in textual SQL \(4\) is "
r"smaller than number of columns requested \(2\)"
):
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c2], "b1")
assert_raises_message(
exc.NoSuchColumnError, "in row for column 'r'", lambda: row[c3]
)
def test_dupe_col_obj(self):
c1, c2, c3 = column("q"), column("p"), column("r")
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c2)
assert_raises_message(
exc.InvalidRequestError,
"Duplicate column expression requested in "
"textual SQL: <.*.ColumnClause.*; p>",
testing.db.execute,
stmt,
)
def test_anon_aliased_unique(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.c
c3 = text1.alias().c.b
c4 = text1.alias().c.d.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# text1.c.b goes nowhere....because we hit key fallback
# but the text1.c.b doesn't derive from text1.c.c
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.b'",
lambda: row[text1.c.b],
)
def test_anon_aliased_overlapping(self):
text1 = self.tables.text1
c1 = text1.c.a.label(None)
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label(None)
c4 = text1.c.a.label(None)
stmt = text("select a, b, c, d from text1").columns(c1, c2, c3, c4)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
def test_anon_aliased_name_conflict(self):
text1 = self.tables.text1
c1 = text1.c.a.label("a")
c2 = text1.alias().c.a
c3 = text1.alias().c.a.label("a")
c4 = text1.c.a.label("a")
# all cols are named "a". if we are positional, we don't care.
# this is new logic in 1.1
stmt = text("select a, b as a, c as a, d as a from text1").columns(
c1, c2, c3, c4
)
result = testing.db.execute(stmt)
row = result.first()
eq_(row[c1], "a1")
eq_(row[c2], "b1")
eq_(row[c3], "c1")
eq_(row[c4], "d1")
# fails, because we hit key fallback and find conflicts
# in columns that are presnet
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'text1.a'",
lambda: row[text1.c.a],
)
class AlternateResultProxyTest(fixtures.TablesTest):
__requires__ = ("sqlite",)
@classmethod
def setup_bind(cls):
cls.engine = engine = engines.testing_engine("sqlite://")
return engine
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("x", Integer, primary_key=True),
Column("y", String(50)),
)
@classmethod
def insert_data(cls):
cls.engine.execute(
cls.tables.test.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(1, 12)],
)
@contextmanager
def _proxy_fixture(self, cls):
self.table = self.tables.test
class ExcCtx(default.DefaultExecutionContext):
def get_result_proxy(self):
return cls(self)
self.patcher = patch.object(
self.engine.dialect, "execution_ctx_cls", ExcCtx
)
with self.patcher:
yield
def _test_proxy(self, cls):
with self._proxy_fixture(cls):
rows = []
r = self.engine.execute(select([self.table]))
assert isinstance(r, cls)
for i in range(5):
rows.append(r.fetchone())
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
rows = r.fetchmany(3)
eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
rows = r.fetchall()
eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
r = self.engine.execute(select([self.table]))
rows = r.fetchmany(None)
eq_(rows[0], (1, "t_1"))
# number of rows here could be one, or the whole thing
assert len(rows) == 1 or len(rows) == 11
r = self.engine.execute(select([self.table]).limit(1))
r.fetchone()
eq_(r.fetchone(), None)
r = self.engine.execute(select([self.table]).limit(5))
rows = r.fetchmany(6)
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
# result keeps going just fine with blank results...
eq_(r.fetchmany(2), [])
eq_(r.fetchmany(2), [])
eq_(r.fetchall(), [])
eq_(r.fetchone(), None)
# until we close
r.close()
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.first(), (1, "t_1"))
self._assert_result_closed(r)
r = self.engine.execute(select([self.table]).limit(5))
eq_(r.scalar(), 1)
self._assert_result_closed(r)
def _assert_result_closed(self, r):
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchone
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchmany, 2
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchall
)
def test_basic_plain(self):
self._test_proxy(_result.ResultProxy)
def test_basic_buffered_row_result_proxy(self):
self._test_proxy(_result.BufferedRowResultProxy)
def test_basic_fully_buffered_result_proxy(self):
self._test_proxy(_result.FullyBufferedResultProxy)
def test_basic_buffered_column_result_proxy(self):
self._test_proxy(_result.BufferedColumnResultProxy)
def test_resultprocessor_plain(self):
self._test_result_processor(_result.ResultProxy, False)
def test_resultprocessor_plain_cached(self):
self._test_result_processor(_result.ResultProxy, True)
def test_resultprocessor_buffered_column(self):
self._test_result_processor(_result.BufferedColumnResultProxy, False)
def test_resultprocessor_buffered_column_cached(self):
self._test_result_processor(_result.BufferedColumnResultProxy, True)
def test_resultprocessor_buffered_row(self):
self._test_result_processor(_result.BufferedRowResultProxy, False)
def test_resultprocessor_buffered_row_cached(self):
self._test_result_processor(_result.BufferedRowResultProxy, True)
def test_resultprocessor_fully_buffered(self):
self._test_result_processor(_result.FullyBufferedResultProxy, False)
def test_resultprocessor_fully_buffered_cached(self):
self._test_result_processor(_result.FullyBufferedResultProxy, True)
def _test_result_processor(self, cls, use_cache):
class MyType(TypeDecorator):
impl = String()
def process_result_value(self, value, dialect):
return "HI " + value
with self._proxy_fixture(cls):
with self.engine.connect() as conn:
if use_cache:
cache = {}
conn = conn.execution_options(compiled_cache=cache)
stmt = select([literal("THERE", type_=MyType())])
for i in range(2):
r = conn.execute(stmt)
eq_(r.scalar(), "HI THERE")
@testing.fixture
def row_growth_fixture(self):
with self._proxy_fixture(_result.BufferedRowResultProxy):
with self.engine.connect() as conn:
conn.execute(
self.table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 3000)],
)
yield conn
@testing.combinations(
("no option", None, {0: 5, 1: 25, 9: 125, 135: 625, 274: 1000}),
("lt 1000", 27, {0: 5, 16: 27, 70: 27, 150: 27, 250: 27}),
(
"gt 1000",
1500,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 1500, 1351: 1500},
),
(
"gt 1500",
2000,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 2000, 1351: 2000},
),
id_="iaa",
argnames="max_row_buffer,checks",
)
def test_buffered_row_growth(
self, row_growth_fixture, max_row_buffer, checks
):
if max_row_buffer:
result = row_growth_fixture.execution_options(
max_row_buffer=max_row_buffer
).execute(self.table.select())
else:
result = row_growth_fixture.execute(self.table.select())
assertion = {}
max_size = max(checks.values())
for idx, row in enumerate(result, 0):
if idx in checks:
assertion[idx] = result._bufsize
le_(len(result._BufferedRowResultProxy__rowbuffer), max_size)
eq_(checks, assertion)
|
bw = ["GEN","EXO","LEV","NUM","DEU","JOS","JDG","RUT","1SA","2SA","1KI","2KI","1CH","2CH","EZR","NEH","EST","JOB","PSA","PRO","ECC","SOL","ISA","JER","LAM","EZE","DAN","HOS","JOE","AMO","OBA","JON","MIC","NAH","HAB","ZEP","HAG","ZEC","MAL","MAT","MAR","LUK","JOH","ACT","ROM","1CO","2CO","GAL","EPH","PHI","COL","1TH","2TH","1TI","2TI","TIT","PHM","HEB","JAM","1PE","2PE","1JO","2JO","3JO","JUD","REV","TOB","JDT","ESG","WIS","SIR","BAR","EPJ","PRA","SUS","BEL","1MA","2MA","3MA","4MA","1ES","4ES","PRM","PSX","ODE","PSS","LAO","SIP","JSA","JDA","DNG","TBS","SUT","DAT","BET",]
usfm = ["GEN","EXO","LEV","NUM","DEU","JOS","JDG","RUT","1SA","2SA","1KI","2KI","1CH","2CH","EZR","NEH","EST","JOB","PSA","PRO","ECC","SNG","ISA","JER","LAM","EZK","DAN","HOS","JOL","AMO","OBA","JON","MIC","NAM","HAB","ZEP","HAG","ZEC","MAL","MAT","MRK","LUK","JHN","ACT","ROM","1CO","2CO","GAL","EPH","PHP","COL","1TH","2TH","1TI","2TI","TIT","PHM","HEB","JAS","1PE","2PE","1JN","2JN","3JN","JUD","REV","TOB","JDT","ESG","WIS","SIR","BAR","LJE","S3Y","SUS","BEL","1MA","2MA","3MA","4MA","1ES","2ES","MAN","PS2","ODA","PSS","EZA","5EZ","6EZ","DAG","PS3","2BA","LBA","JUB","ENO","1MQ","2MQ","3MQ","REP","4BA","LAO",]
bw_not_in_usfm = []
usfm_not_in_bw = []
for i in bw:
if i not in usfm:
bw_not_in_usfm.append(i)
for i in usfm:
if i not in bw:
usfm_not_in_bw.append(i)
#print(bw_not_in_usfm)
#print(usfm_not_in_bw)
book_matches = zip(bw_not_in_usfm[:17], usfm_not_in_bw)
#print(list(book_matches))
book_string = ""
for bw, usfm in book_matches:
book_string += "{}={}:".format(bw,usfm)
bw_joined = '|'.join(bw_not_in_usfm[:17])
print("Find: {}".format(bw_joined))
print("Dictionary:{}".format(book_string)) # Dictionary for regex dict lookup and replace
'''
1. Paste dictionary at the bottom of the file containing BW book abbreviations to be converted to USFM (remove string final colon)
2. Run regular expression FIND for (?s)(bw_joined)(?=.*:\1=(\w+)\b)
Nb. insert bw_joined output in place of bw_joined
3. Replace (in EditPadPro 8): \g<2>
4. Remove pasted dictionary from end of file.
'''
|
#!/usr/bin/env python
############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at sales@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import sys
import re
from PySide import QtCore, QtGui, QtHelp, QtNetwork, QtXml
from displaywidget import DisplayWidget
from displayshape import TitleShape, DisplayShape, PanelShape, ImageShape, DocumentShape
class Launcher(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self, parent)
self.categories = {}
self.runningProcesses = {}
self.examples = {}
self.runningExamples = []
self.titleFont = QtGui.QFont(self.font())
self.titleFont.setWeight(QtGui.QFont.Bold)
self.fontRatio = 0.8
self.documentFont = self.font()
self.inFullScreenResize = False
self.currentCategory = "[starting]"
self.qtLogo = QtGui.QImage()
self.rbLogo = QtGui.QImage()
self.currentExample = ""
self.assistantProcess = QtCore.QProcess()
parentPageAction1 = QtGui.QAction(self.tr("Show Parent Page"), self)
parentPageAction2 = QtGui.QAction(self.tr("Show Parent Page"), self)
parentPageAction3 = QtGui.QAction(self.tr("Show Parent Page"), self)
parentPageAction1.setShortcut(QtGui.QKeySequence(self.tr("Escape")))
parentPageAction2.setShortcut(QtGui.QKeySequence(self.tr("Backspace")))
parentPageAction3.setShortcut(QtGui.QKeySequence(self.tr("Alt+Left")))
fullScreenAction = QtGui.QAction(self.tr("Toggle &Full Screen"), self)
fullScreenAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+F")))
exitAction = QtGui.QAction(self.tr("E&xit"), self)
exitAction.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Q")))
self.connect(parentPageAction1, QtCore.SIGNAL("triggered()"), self, QtCore.SIGNAL("showPage()"))
self.connect(parentPageAction2, QtCore.SIGNAL("triggered()"), self, QtCore.SIGNAL("showPage()"))
self.connect(parentPageAction3, QtCore.SIGNAL("triggered()"), self, QtCore.SIGNAL("showPage()"))
self.connect(fullScreenAction, QtCore.SIGNAL("triggered()"), self.toggleFullScreen)
self.connect(exitAction, QtCore.SIGNAL("triggered()"), self.close)
self.display = DisplayWidget()
self.addAction(parentPageAction1)
self.addAction(parentPageAction2)
self.addAction(parentPageAction3)
self.addAction(fullScreenAction)
self.addAction(exitAction)
self.slideshowTimer = QtCore.QTimer(self)
self.slideshowTimer.setInterval(5000)
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.setSingleShot(True)
self.connect(self.resizeTimer, QtCore.SIGNAL("timeout()"), self.redisplayWindow)
self.connect(self.display, QtCore.SIGNAL("actionRequested"),
self.executeAction)
self.connect(self.display, QtCore.SIGNAL("categoryRequested"),
self.showExamples)
self.connect(self.display, QtCore.SIGNAL("documentationRequested"),
self.showExampleDocumentation)
self.connect(self.display, QtCore.SIGNAL("exampleRequested"),
self.showExampleSummary)
self.connect(self.display, QtCore.SIGNAL("launchRequested"),
self.launchExample)
self.connect(self, QtCore.SIGNAL("showPage()"), self.showParentPage,
QtCore.Qt.QueuedConnection)
self.connect(self, QtCore.SIGNAL("windowResized()"), self.redisplayWindow,
QtCore.Qt.QueuedConnection)
self.setCentralWidget(self.display)
self.setMaximumSize(QtGui.QApplication.desktop().screenGeometry().size())
self.setWindowTitle(self.tr("PyQt Examples and Demos"))
self.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(":/images/qt4-logo.png")))
def initHelpEngine(self):
self.helpRootUrl = QtCore.QString("qthelp://com.trolltech.qt.%d%d%d/qdoc/" % (QtCore.QT_VERSION >> 16, ((QtCore.QT_VERSION >> 8) & 0xff), (QtCore.QT_VERSION & 0xff)))
# Store help collection file in cache dir of assistant.
cacheDir = QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DataLocation) + "/Trolltech/Assistant"
helpDataFile = "qtdemo_%s.qhc" % QtCore.QT_VERSION_STR
dir = QtCore.QDir()
if not dir.exists(cacheDir):
dir.mkpath(cacheDir)
# Create the help engine (and a new helpDataFile if it does not exist).
self.helpEngine = QtHelp.QHelpEngineCore(cacheDir + helpDataFile)
self.helpEngine.setupData()
qtDocRoot = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.DocumentationPath) + "/qch"
qtDocRoot = QtCore.QDir(qtDocRoot).absolutePath()
qchFiles = ["/qt.qch", "/designer.qch", "/linguist.qch"]
oldDir = self.helpEngine.customValue("docDir", QtCore.QVariant(QtCore.QString())).toString()
if oldDir != qtDocRoot:
for qchFile in qchFiles:
self.helpEngine.unregisterDocumentation(QtHelp.QHelpEngineCore.namespaceName(qtDocRoot + qchFile))
# If the data that the engine will work on is not yet registered, do it
# now.
for qchFile in qchFiles:
self.helpEngine.registerDocumentation(qtDocRoot + qchFile)
self.helpEngine.setCustomValue("docDir", QtCore.QVariant(qtDocRoot))
def setup(self):
self.initHelpEngine()
self.documentationDir = QtCore.QDir(
QtCore.QLibraryInfo.location(
QtCore.QLibraryInfo.DocumentationPath))
self.imagesDir = QtCore.QDir(self.documentationDir)
if self.documentationDir.cd("html") and self.documentationDir.cd("images"):
self.imagesDir.setPath(self.documentationDir.path())
self.documentationDir.cdUp()
else:
QtGui.QMessageBox.warning(self, self.tr("No Documentation Found"),
self.tr("I could not find the Qt documentation."))
self.maximumLabels = 0
self.demosDir = QtCore.QDir("./../../demos")
demoCategories = self.readInfo(":/demos.xml", self.demosDir)
self.examplesDir = QtCore.QDir("./../../")
exampleCategories = self.readInfo(":/examples.xml", self.examplesDir)
if demoCategories + exampleCategories <= 0:
QtGui.QMessageBox.warning(self, self.tr("No Examples or Demos found"),
self.tr("I could not find any PyQt examples or demos.\n"\
"Please ensure that PyQt is installed correctly."),
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.NoButton)
return False
self.maximumLabels = max(demoCategories + exampleCategories, self.maximumLabels)
for category in self.categories:
self.maximumLabels = max(len(self.categories[category]['examples']) + 1, self.maximumLabels)
mainDescription = self.categories['[main]']['description']
if len(mainDescription) > 0:
mainDescription += self.tr("\n")
self.categories['[main]']['description'] = mainDescription + self.tr(
"<p>Press <b>Escape</b>, <b>Backspace</b>, or <b>%1</b> to "
"return to a previous menu.</p>"
"<p>Press <b>%2</b> to switch between normal and full screen "
"modes.</p>"
"<p>Use <b>%3</b> to exit the launcher.</p>") \
.arg(QtCore.QString(QtGui.QKeySequence(self.tr("Alt+Left")))) \
.arg(QtCore.QString(QtGui.QKeySequence(self.tr("Ctrl+F")))) \
.arg(QtCore.QString(QtGui.QKeySequence(self.tr("Ctrl+Q"))))
self.emit(QtCore.SIGNAL("showPage()"))
return True
def enableLaunching(self):
process = self.sender()
example = self.runningProcesses[process]
del self.runningProcesses[process]
process.deleteLater()
self.runningExamples.remove(example)
if example == self.currentExample:
for i in range(0, self.display.shapesCount()):
shape = self.display.shape(i)
if shape.metadata.get("launch", "") == example:
shape.metadata["fade"] = 15
self.display.enableUpdates()
self.slideshowTimer.start()
def executeAction(self, action):
if action == "parent":
self.showParentPage()
elif action == "exit":
if len(self.runningExamples) == 0:
self.connect(self.display, QtCore.SIGNAL("displayEmpty()"), self.close)
self.display.reset()
else:
self.close()
def launchExample(self, uniquename):
if uniquename in self.runningExamples:
return
process = QtCore.QProcess(self)
self.connect(process, QtCore.SIGNAL("finished(int)"), self.enableLaunching)
self.runningExamples.append(uniquename)
self.runningProcesses[process] = uniquename
if self.examples[uniquename]['changedirectory'] == 'true':
process.setWorkingDirectory(self.examples[uniquename]['absolute path'])
process.start(sys.executable, [self.examples[uniquename]['path']])
if process.state() == QtCore.QProcess.Starting:
self.slideshowTimer.stop()
def showCategories(self):
self.newPage()
self.fadeShapes()
self.currentCategory = ""
self.currentExample = ""
# Sort the category names excluding any "Qt" prefix.
def csort(c1, c2):
if c1.startsWith("Qt "):
c1 = c1[3:]
if c2.startsWith("Qt "):
c2 = c2[3:]
return cmp(c1, c2)
categories = [c for c in self.categories.keys() if c != "[main]"]
categories.sort(csort)
horizontalMargin = 0.025*self.width()
verticalMargin = 0.025*self.height()
title = TitleShape(self.tr("PyQt Examples and Demos"),
self.titleFont, QtGui.QPen(QtGui.QColor("#a6ce39")), QtCore.QPointF(),
QtCore.QSizeF(0.5*self.width(), 4*verticalMargin))
title.setPosition(QtCore.QPointF(self.width() / 2 - title.rect().width() / 2,
-title.rect().height()))
title.setTarget(QtCore.QPointF(title.position().x(), verticalMargin))
self.display.appendShape(title)
topMargin = 6*verticalMargin
bottomMargin = self.height() - 3.2*verticalMargin
space = bottomMargin - topMargin
step = min(title.rect().height() / self.fontRatio, space/self.maximumLabels )
textHeight = self.fontRatio * step
startPosition = QtCore.QPointF(0.0, topMargin)
maxSize = QtCore.QSizeF(10.8*horizontalMargin, textHeight)
maxWidth = 0.0
newShapes = []
for category in categories:
caption = TitleShape(category, self.font(), QtGui.QPen(), QtCore.QPointF(startPosition), QtCore.QSizeF(maxSize))
caption.setPosition(QtCore.QPointF(-caption.rect().width(),
caption.position().y()))
caption.setTarget(QtCore.QPointF(2*horizontalMargin, caption.position().y()))
newShapes.append(caption)
startPosition += QtCore.QPointF(0.0, step)
maxWidth = max(maxWidth, caption.rect().width() )
exitButton = TitleShape(self.tr("Exit"), self.font(), QtGui.QPen(QtCore.Qt.white),
QtCore.QPointF(startPosition), QtCore.QSizeF(maxSize))
exitButton.setTarget(QtCore.QPointF(2*horizontalMargin, exitButton.position().y()))
newShapes.append(exitButton)
startPosition = QtCore.QPointF(self.width(), topMargin )
extra = (step - textHeight)/4
backgroundPath = QtGui.QPainterPath()
backgroundPath.addRect(-2*extra, -extra, maxWidth + 4*extra, textHeight + 2*extra)
for category in categories:
background = PanelShape(backgroundPath,
QtGui.QBrush(self.categories[category]['color']), QtGui.QBrush(QtGui.QColor("#e0e0ff")),
QtGui.QPen(QtCore.Qt.NoPen), QtCore.QPointF(startPosition),
QtCore.QSizeF(maxWidth + 4*extra, textHeight + 2*extra))
background.metadata["category"] = category
background.setInteractive(True)
background.setTarget(QtCore.QPointF(2*horizontalMargin,
background.position().y()))
self.display.insertShape(0, background)
startPosition += QtCore.QPointF(0.0, step)
exitPath = QtGui.QPainterPath()
exitPath.moveTo(-2*extra, -extra)
exitPath.lineTo(-8*extra, textHeight/2)
exitPath.lineTo(-extra, textHeight + extra)
exitPath.lineTo(maxWidth + 2*extra, textHeight + extra)
exitPath.lineTo(maxWidth + 2*extra, -extra)
exitPath.closeSubpath()
exitBackground = PanelShape(exitPath,
QtGui.QBrush(QtGui.QColor("#a6ce39")), QtGui.QBrush(QtGui.QColor("#c7f745")),
QtGui.QPen(QtCore.Qt.NoPen), QtCore.QPointF(startPosition),
QtCore.QSizeF(maxWidth + 10*extra, textHeight + 2*extra))
exitBackground.metadata["action"] = "exit"
exitBackground.setInteractive(True)
exitBackground.setTarget(QtCore.QPointF(2*horizontalMargin,
exitBackground.position().y()))
self.display.insertShape(0, exitBackground)
for caption in newShapes:
position = caption.target()
size = caption.rect().size()
caption.setPosition(QtCore.QPointF(-maxWidth, position.y()))
self.display.appendShape(caption)
leftMargin = 3*horizontalMargin + maxWidth
rightMargin = self.width() - 3*horizontalMargin
description = DocumentShape(self.categories['[main]']['description'],
self.documentFont, QtCore.QPointF(leftMargin, topMargin),
QtCore.QSizeF(rightMargin - leftMargin, space))
description.metadata["fade"] = 10
self.display.appendShape(description)
imageHeight = title.rect().height() + verticalMargin
qtLength = min(imageHeight, title.rect().left()-3*horizontalMargin)
qtMaxSize = QtCore.QSizeF(qtLength, qtLength)
qtShape = ImageShape(self.qtLogo,
QtCore.QPointF(2*horizontalMargin-extra, -imageHeight), qtMaxSize, 0,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
qtShape.metadata["fade"] = 15
qtShape.setTarget(QtCore.QPointF(qtShape.rect().x(), verticalMargin))
self.display.insertShape(0, qtShape)
trolltechMaxSize = QtCore.QSizeF(
self.width()-3*horizontalMargin-title.rect().right(), imageHeight)
trolltechShape = ImageShape(self.rbLogo,
QtCore.QPointF(self.width()-2*horizontalMargin-trolltechMaxSize.width()+extra,
-imageHeight),
trolltechMaxSize, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
trolltechShape.metadata["fade"] = 15
trolltechShape.setTarget(QtCore.QPointF(trolltechShape.rect().x(),
verticalMargin))
self.display.insertShape(0, trolltechShape)
self.addVersionAndCopyright(QtCore.QRectF(2*horizontalMargin,
self.height() - verticalMargin - textHeight,
self.width() - 4*horizontalMargin, textHeight))
def showExampleDocumentation(self, uniqueName):
self.disconnect(self.display, QtCore.SIGNAL("displayEmpty()"), self.resizeWindow)
self.disconnect(self.display, QtCore.SIGNAL("displayEmpty()"), self.close)
self.currentExample = uniqueName
url = self.helpRootUrl + self.examples[uniqueName]["document path"]
# Start assistant if it isn't already running.
if self.assistantProcess.state() != QtCore.QProcess.Running:
app = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
if sys.platform == 'darwin':
app += "Assistant.app/Contents/MacOS/Assistant"
else:
app += "assistant"
self.assistantProcess.start(app, ["-enableRemoteControl"])
if not self.assistantProcess.waitForStarted():
QtGui.QMessageBox.critical(None, "Qt Demo", "Could not start Qt Assistant.")
return
# Send command through remote control even if the process was started
# to activate assistant and bring it to front.
str = QtCore.QTextStream(self.assistantProcess)
str << "SetSource " << url << '\n'
def showExamples(self, category):
self.newPage()
self.fadeShapes()
self.currentCategory = category
self.currentExample = ""
horizontalMargin = 0.025*self.width()
verticalMargin = 0.025*self.height()
title = self.addTitle(category, verticalMargin)
self.addTitleBackground(title)
topMargin = 6*verticalMargin
bottomMargin = self.height() - 3.2*verticalMargin
space = bottomMargin - topMargin
step = min(title.rect().height() / self.fontRatio, space/self.maximumLabels )
textHeight = self.fontRatio * step
startPosition = QtCore.QPointF(2*horizontalMargin, self.height()+topMargin)
finishPosition = QtCore.QPointF(2*horizontalMargin, topMargin)
maxSize = QtCore.QSizeF(32*horizontalMargin, textHeight)
maxWidth = 0.0
for example in self.categories[self.currentCategory]['examples']:
caption = TitleShape(example, self.font(), QtGui.QPen(), QtCore.QPointF(startPosition), QtCore.QSizeF(maxSize))
caption.setTarget(QtCore.QPointF(finishPosition))
self.display.appendShape(caption)
startPosition += QtCore.QPointF(0.0, step)
finishPosition += QtCore.QPointF(0.0, step)
maxWidth = max(maxWidth, caption.rect().width() )
menuButton = TitleShape(self.tr("Main Menu"), self.font(),
QtGui.QPen(QtCore.Qt.white),
QtCore.QPointF(startPosition),
QtCore.QSizeF(maxSize))
menuButton.setTarget(QtCore.QPointF(finishPosition))
self.display.appendShape(menuButton)
startPosition = QtCore.QPointF(self.width(), topMargin )
extra = (step - textHeight)/4
for example in self.categories[self.currentCategory]['examples']:
uniquename = self.currentCategory + "-" + example
path = QtGui.QPainterPath()
path.addRect(-2*extra, -extra, maxWidth + 4*extra, textHeight+2*extra)
background = PanelShape(path,
QtGui.QBrush(self.examples[uniquename]['color']),
QtGui.QBrush(QtGui.QColor("#e0e0ff")),
QtGui.QPen(QtCore.Qt.NoPen), QtCore.QPointF(startPosition),
QtCore.QSizeF(maxWidth + 4*extra, textHeight + 2*extra))
background.metadata["example"] = uniquename
background.setInteractive(True)
background.setTarget(QtCore.QPointF(2*horizontalMargin,
background.position().y()))
self.display.insertShape(0, background)
startPosition += QtCore.QPointF(0.0, step)
backPath = QtGui.QPainterPath()
backPath.moveTo(-2*extra, -extra)
backPath.lineTo(-8*extra, textHeight/2)
backPath.lineTo(-extra, textHeight + extra)
backPath.lineTo(maxWidth + 2*extra, textHeight + extra)
backPath.lineTo(maxWidth + 2*extra, -extra)
backPath.closeSubpath()
buttonBackground = PanelShape(backPath,
QtGui.QBrush(QtGui.QColor("#a6ce39")), QtGui.QBrush(QtGui.QColor("#c7f745")),
QtGui.QPen(QtCore.Qt.NoPen), QtCore.QPointF(startPosition),
QtCore.QSizeF(maxWidth + 10*extra, textHeight + 2*extra))
buttonBackground.metadata["action"] = "parent"
buttonBackground.setInteractive(True)
buttonBackground.setTarget(QtCore.QPointF(2*horizontalMargin,
buttonBackground.position().y()))
self.display.insertShape(0, buttonBackground)
leftMargin = 3*horizontalMargin + maxWidth
rightMargin = self.width() - 3*horizontalMargin
description = DocumentShape(self.categories[self.currentCategory]['description'],
self.documentFont, QtCore.QPointF(leftMargin, topMargin),
QtCore.QSizeF(rightMargin - leftMargin, space), 0)
description.metadata["fade"] = 10
self.display.appendShape(description)
self.addVersionAndCopyright(QtCore.QRectF(2*horizontalMargin,
self.height() - verticalMargin - textHeight,
self.width() - 4*horizontalMargin, textHeight))
def showExampleSummary(self, uniquename):
self.newPage()
self.fadeShapes()
self.currentExample = uniquename
horizontalMargin = 0.025*self.width()
verticalMargin = 0.025*self.height()
title = self.addTitle(self.examples[uniquename]['name'], verticalMargin)
titleBackground = self.addTitleBackground(title)
topMargin = 2*verticalMargin + titleBackground.rect().bottom()
bottomMargin = self.height() - 8*verticalMargin
space = bottomMargin - topMargin
step = min(title.rect().height() / self.fontRatio,
( bottomMargin + 4.8*verticalMargin - topMargin )/self.maximumLabels )
footerTextHeight = self.fontRatio * step
leftMargin = 3*horizontalMargin
rightMargin = self.width() - 3*horizontalMargin
if self.examples[self.currentExample].has_key('description'):
description = DocumentShape( self.examples[self.currentExample]['description'],
self.documentFont, QtCore.QPointF(leftMargin, topMargin),
QtCore.QSizeF(rightMargin-leftMargin, space), 0 )
description.metadata["fade"] = 10
description.setPosition(QtCore.QPointF(description.position().x(),
0.8*self.height()-description.rect().height()))
self.display.appendShape(description)
space = description.position().y() - topMargin - 2*verticalMargin
if self.examples[self.currentExample].has_key('image files'):
image = QtGui.QImage(self.examples[self.currentExample]['image files'][0])
imageMaxSize = QtCore.QSizeF(self.width() - 8*horizontalMargin, space)
self.currentFrame = ImageShape( image,
QtCore.QPointF(self.width()-imageMaxSize.width()/2, topMargin),
QtCore.QSizeF(imageMaxSize ))
self.currentFrame.metadata["fade"] = 15
self.currentFrame.setTarget(QtCore.QPointF(self.width()/2-imageMaxSize.width()/2,
topMargin))
self.display.appendShape(self.currentFrame)
if len(self.examples[self.currentExample]['image files']) > 1:
self.connect(self.slideshowTimer, QtCore.SIGNAL("timeout()"),
self.updateExampleSummary)
self.slideshowFrame = 0
self.slideshowTimer.start()
maxSize = QtCore.QSizeF(0.3*self.width(),2*verticalMargin)
leftMargin = 0.0
rightMargin = 0.0
backButton = TitleShape(self.currentCategory, self.font(),
QtGui.QPen(QtCore.Qt.white), QtCore.QPointF(0.1*self.width(), self.height()), QtCore.QSizeF(maxSize),
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
backButton.setTarget(QtCore.QPointF(backButton.position().x(),
self.height() - 5.2*verticalMargin))
self.display.appendShape(backButton)
maxWidth = backButton.rect().width()
textHeight = backButton.rect().height()
extra = (3*verticalMargin - textHeight)/4
path = QtGui.QPainterPath()
path.moveTo(-extra, -extra)
path.lineTo(-4*extra, textHeight/2)
path.lineTo(-extra, textHeight + extra)
path.lineTo(maxWidth + 2*extra, textHeight + extra)
path.lineTo(maxWidth + 2*extra, -extra)
path.closeSubpath()
buttonBackground = PanelShape(path,
QtGui.QBrush(QtGui.QColor("#a6ce39")), QtGui.QBrush(QtGui.QColor("#c7f745")), QtGui.QPen(QtCore.Qt.NoPen),
QtCore.QPointF(backButton.position()),
QtCore.QSizeF(maxWidth + 6*extra, textHeight + 2*extra))
buttonBackground.metadata["category"] = self.currentCategory
buttonBackground.setInteractive(True)
buttonBackground.setTarget(QtCore.QPointF(backButton.target()))
self.display.insertShape(0, buttonBackground)
leftMargin = buttonBackground.rect().right()
if self.examples[self.currentExample].has_key('absolute path'):
launchCaption = TitleShape(self.tr("Launch"),
self.font(), QtGui.QPen(QtCore.Qt.white), QtCore.QPointF(0.0, 0.0), QtCore.QSizeF(maxSize),
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
launchCaption.setPosition(QtCore.QPointF(
0.9*self.width() - launchCaption.rect().width(), self.height()))
launchCaption.setTarget(QtCore.QPointF(launchCaption.position().x(),
self.height() - 5.2*verticalMargin))
self.display.appendShape(launchCaption)
maxWidth = launchCaption.rect().width()
textHeight = launchCaption.rect().height()
extra = (3*verticalMargin - textHeight)/4
path = QtGui.QPainterPath()
path.addRect(-2*extra, -extra, maxWidth + 4*extra, textHeight + 2*extra)
backgroundColor = QtGui.QColor("#a63e39")
highlightedColor = QtGui.QColor("#f95e56")
background = PanelShape(path,
QtGui.QBrush(backgroundColor), QtGui.QBrush(highlightedColor), QtGui.QPen(QtCore.Qt.NoPen),
QtCore.QPointF(launchCaption.position()),
QtCore.QSizeF(maxWidth + 4*extra, textHeight + 2*extra))
background.metadata["fade minimum"] = 120
background.metadata["launch"] = self.currentExample
background.setInteractive(True)
background.setTarget(QtCore.QPointF(launchCaption.target()))
if self.currentExample in self.runningExamples:
background.metadata["highlight"] = True
background.metadata["highlight scale"] = 0.99
background.animate()
background.metadata["fade"] = -135
self.slideshowTimer.stop()
self.display.insertShape(0, background)
rightMargin = background.rect().left()
if self.examples[self.currentExample]['document path']:
documentCaption = TitleShape(self.tr("Show Documentation"),
self.font(), QtGui.QPen(QtCore.Qt.white), QtCore.QPointF(0.0, 0.0), QtCore.QSizeF(maxSize),
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
if rightMargin == 0.0:
documentCaption.setPosition(QtCore.QPointF(
0.9*self.width() - documentCaption.rect().width(), self.height()))
else:
documentCaption.setPosition(QtCore.QPointF(
leftMargin/2 + rightMargin/2 - documentCaption.rect().width()/2,
self.height()))
documentCaption.setTarget(QtCore.QPointF(documentCaption.position().x(),
self.height() - 5.2*verticalMargin))
self.display.appendShape(documentCaption)
maxWidth = documentCaption.rect().width()
textHeight = documentCaption.rect().height()
extra = (3*verticalMargin - textHeight)/4
path = QtGui.QPainterPath()
path.addRect(-2*extra, -extra, maxWidth + 4*extra, textHeight + 2*extra)
background = PanelShape(path,
QtGui.QBrush(QtGui.QColor("#9c9cff")), QtGui.QBrush(QtGui.QColor("#cfcfff")), QtGui.QPen(QtCore.Qt.NoPen),
QtCore.QPointF(documentCaption.position()),
QtCore.QSizeF(maxWidth + 4*extra, textHeight + 2*extra))
background.metadata["fade minimum"] = 120
background.metadata["documentation"] = self.currentExample
background.setInteractive(True)
background.setTarget(QtCore.QPointF(documentCaption.target()))
self.display.insertShape(0, background)
self.addVersionAndCopyright(QtCore.QRectF(2*horizontalMargin,
self.height() - verticalMargin - footerTextHeight,
self.width() - 4*horizontalMargin, footerTextHeight))
def showParentPage(self):
self.slideshowTimer.stop()
self.disconnect(self.slideshowTimer, QtCore.SIGNAL("timeout()"), self.updateExampleSummary)
if len(self.currentExample) > 0:
self.currentExample = ""
self.redisplayWindow()
elif len(self.currentCategory) > 0:
self.currentCategory = ""
self.redisplayWindow()
def updateExampleSummary(self):
if self.examples[self.currentExample].has_key('image files'):
self.currentFrame.metadata["fade"] = -15
self.currentFrame.setTarget(QtCore.QPointF((self.currentFrame.position() -
QtCore.QPointF(0.5*self.width(), 0))))
self.slideshowFrame = (self.slideshowFrame+1) % len(self.examples[self.currentExample]['image files'])
image = QtGui.QImage(self.examples[self.currentExample]['image files'][self.slideshowFrame])
imageSize = self.currentFrame.maxSize
imagePosition = QtCore.QPointF(self.width() - imageSize.width()/2,
self.currentFrame.position().y())
self.currentFrame = ImageShape(image, QtCore.QPointF(imagePosition), QtCore.QSizeF(imageSize))
self.currentFrame.metadata["fade"] = 15
self.currentFrame.setTarget(QtCore.QPointF(self.width()/2-imageSize.width()/2,
imagePosition.y()))
self.display.appendShape(self.currentFrame)
def closeEvent(self, event):
if len(self.runningExamples) > 0:
if QtGui.QMessageBox.warning(self, self.tr("Examples Running"),
self.tr("There are examples running. Do you really want to exit?"),
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No ) == QtGui.QMessageBox.No:
event.ignore()
return
for example in self.runningProcesses.keys():
example.terminate()
example.waitForFinished(1000)
self.runningProcesses.clear()
self.resizeTimer.stop()
self.slideshowTimer.stop()
def resizeEvent(self, event):
self.documentFont = QtGui.QFont(self.font())
self.documentFont.setPointSizeF(min(self.documentFont.pointSizeF()*self.width()/640.0,
self.documentFont.pointSizeF()*self.height()/480.0))
if self.inFullScreenResize:
self.emit(QtCore.SIGNAL("windowResized()"))
self.inFullScreenResize = False
elif self.currentCategory != "[starting]":
self.resizeTimer.start(500)
def toggleFullScreen(self):
if self.inFullScreenResize:
return
self.inFullScreenResize = True
self.connect(self.display, QtCore.SIGNAL("displayEmpty()"), self.resizeWindow, QtCore.Qt.QueuedConnection)
self.display.reset()
def redisplayWindow(self):
if len(self.currentExample) > 0:
self.showExampleSummary(self.currentExample)
elif len(self.currentCategory) > 0:
self.showExamples(self.currentCategory)
else:
self.showCategories()
def resizeWindow(self):
self.disconnect(self.display, QtCore.SIGNAL("displayEmpty()"), self.resizeWindow)
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def addTitle(self, title, verticalMargin):
titlePosition = QtCore.QPointF(0.0, 2*verticalMargin)
newTitle = TitleShape(title, self.titleFont, QtGui.QPen(QtCore.Qt.white),
QtCore.QPointF(titlePosition), QtCore.QSizeF(0.5*self.width(), 2*verticalMargin ),
QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop )
newTitle.setPosition(QtCore.QPointF(-newTitle.rect().width(), titlePosition.y()))
newTitle.setTarget(QtCore.QPointF(0.25*self.width(), titlePosition.y()))
newTitle.metadata["fade"] = 15
self.display.appendShape(newTitle)
return newTitle
def addTitleBackground(self, titleShape):
backgroundPath = QtGui.QPainterPath()
backgroundPath.addRect(0, -titleShape.rect().height()*0.3, self.width(),
titleShape.rect().height()*1.6)
titleBackground = PanelShape(backgroundPath, QtGui.QBrush(QtGui.QColor("#a6ce39") ),
QtGui.QBrush(QtGui.QColor("#a6ce39") ), QtGui.QPen(QtCore.Qt.NoPen),
QtCore.QPointF( self.width(), titleShape.position().y() ),
QtCore.QSizeF(backgroundPath.boundingRect().size() ))
titleBackground.setTarget(QtCore.QPointF(0.0, titleShape.position().y()))
self.display.insertShape(0, titleBackground)
return titleBackground
def readExampleDescription(self, parentNode):
node = parentNode.firstChild()
description = ""
while not node.isNull():
if node.isText():
description += node.nodeValue()
elif node.hasChildNodes():
if node.nodeName() == "b":
beginTag = "<b>"
endTag = "</b>"
elif node.nodeName() == "a":
beginTag = "<font color=\"blue\">"
endTag = "</font>"
elif node.nodeName() == "i":
beginTag = "<i>"
endTag = "</i>"
elif node.nodeName() == "tt":
beginTag = "<tt>"
endTag = "</tt>"
description += beginTag + self.readExampleDescription(node) + endTag
node = node.nextSibling()
return description
def readInfo(self, resource, dir_):
categoriesFile = QtCore.QFile(resource)
document = QtXml.QDomDocument()
document.setContent(categoriesFile)
documentElement = document.documentElement()
categoryNodes = documentElement.elementsByTagName("category")
self.categories['[main]'] = {}
self.categories['[main]']['examples'] = []
self.categories['[main]']['color'] = QtGui.QColor("#f0f0f0")
self.readCategoryDescription(dir_, '[main]')
self.qtLogo.load(self.imagesDir.absoluteFilePath(":/images/qt4-logo.png"))
self.rbLogo.load(self.imagesDir.absoluteFilePath(":/images/rb-logo.png"))
for i in range(categoryNodes.length()):
elem = categoryNodes.item(i).toElement()
categoryName = QtCore.QString(elem.attribute("name"))
categoryDirName = QtCore.QString(elem.attribute("dirname"))
categoryDocName = QtCore.QString(elem.attribute("docname"))
categoryColor = QtGui.QColor(elem.attribute("color", "#f0f0f0"))
categoryDir = QtCore.QDir(dir_)
if categoryDir.cd(categoryDirName):
self.categories[categoryName] = {}
self.readCategoryDescription(categoryDir, categoryName)
self.categories[categoryName]['examples'] = []
exampleNodes = elem.elementsByTagName("example")
self.maximumLabels = max(self.maximumLabels, exampleNodes.length())
# Only add those examples we can find.
for j in range(exampleNodes.length()):
exampleDir = QtCore.QDir(categoryDir)
exampleNode = exampleNodes.item(j)
element = exampleNode.toElement()
exampleName = element.attribute("name")
exampleFileName = element.attribute("filename")
uniqueName = categoryName + "-" + exampleName
self.examples[uniqueName] = {}
if not categoryDocName.isEmpty():
docName = categoryDocName + "-" + exampleFileName + ".html"
else:
docName = categoryDirName + "-" + exampleFileName + ".html"
self.examples[uniqueName]['name'] = exampleName
self.examples[uniqueName]['document path'] = ""
self.findDescriptionAndImages(uniqueName, docName)
self.examples[uniqueName]['changedirectory'] = element.attribute("changedirectory", "true")
self.examples[uniqueName]['color'] = QtGui.QColor(element.attribute("color", "#f0f0f0"))
if element.attribute("executable", "true") != "true":
del self.examples[uniqueName]
continue
examplePath = None
if sys.platform == "win32":
examplePyName = exampleFileName + ".pyw"
else:
examplePyName = exampleFileName + ".py"
if exampleDir.exists(examplePyName):
examplePath = exampleDir.absoluteFilePath(examplePyName)
elif exampleDir.cd(exampleFileName):
if exampleDir.exists(examplePyName):
examplePath = exampleDir.absoluteFilePath(examplePyName)
if examplePath and not examplePath.isNull():
self.examples[uniqueName]['absolute path'] = exampleDir.absolutePath()
self.examples[uniqueName]['path'] = examplePath
self.categories[categoryName]['examples'].append(exampleName)
else:
del self.examples[uniqueName]
self.categories[categoryName]['color'] = categoryColor
return len(self.categories)
def addVersionAndCopyright(self, rect):
versionCaption = TitleShape(QtCore.QString("Qt %1").arg(QtCore.QT_VERSION_STR),
self.font(), QtGui.QPen(QtGui.QColor(0,0,0,0)),
QtCore.QPointF(rect.center().x(), rect.top()),
QtCore.QSizeF(0.5*rect.width(), rect.height()),
QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
versionCaption.metadata["fade"] = 15
self.display.appendShape(versionCaption)
copyrightCaption = TitleShape(QtCore.QString("Copyright \xa9 2005-2006 Trolltech AS"),
self.font(), QtGui.QPen(QtGui.QColor(0,0,0,0)),
QtCore.QPointF(rect.topLeft()),
QtCore.QSizeF(0.5*rect.width(), rect.height()),
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
copyrightCaption.metadata["fade"] = 15
self.display.appendShape(copyrightCaption)
def fadeShapes(self):
for i in range(0, self.display.shapesCount()):
shape = self.display.shape(i)
shape.metadata["fade"] = -15
shape.metadata["fade minimum"] = 0
def findDescriptionAndImages(self, uniqueName, docName):
if self.documentationDir.exists(docName):
self.examples[uniqueName]['document path'] = docName
exampleDoc = QtXml.QDomDocument()
exampleFile = QtCore.QFile(self.documentationDir.absoluteFilePath(docName))
exampleDoc.setContent(exampleFile)
paragraphs = exampleDoc.elementsByTagName("p")
for p in range(paragraphs.length()):
descriptionNode = paragraphs.item(p)
description = self.readExampleDescription(descriptionNode)
if QtCore.QString(description).indexOf(QtCore.QRegExp(QtCore.QString("((The|This) )?(%1 )?.*(example|demo)").arg(self.examples[uniqueName]['name']), QtCore.Qt.CaseInsensitive)) != -1:
self.examples[uniqueName]['description'] = description
break
images = exampleDoc.elementsByTagName("img")
imageFiles = []
for i in range(images.length()):
imageElement = images.item(i).toElement()
source = QtCore.QString(imageElement.attribute("src"))
if "-logo" not in source:
imageFiles.append(self.documentationDir.absoluteFilePath(source))
if len(imageFiles) > 0:
self.examples[uniqueName]['image files'] = imageFiles
def newPage(self):
self.slideshowTimer.stop()
self.disconnect(self.slideshowTimer, QtCore.SIGNAL("timeout()"), self.updateExampleSummary)
self.disconnect(self.display, QtCore.SIGNAL("displayEmpty()"), self.resizeWindow)
def readCategoryDescription(self, categoryDir, categoryName):
## categoryDirName = categoryDir.absolutePath()
## if categoryDirName.find("examples") != -1:
## categoryDirName = re.sub(".*/examples(.*)", r"%s\1" % QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.ExamplesPath), categoryDirName)
## else:
## categoryDirName = re.sub(".*/demos(.*)", r"%s\1" % QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.DemosPath), categoryDirName)
## categoryDir = QtCore.QDir(categoryDirName)
if categoryDir.exists("README"):
file = QtCore.QFile(categoryDir.absoluteFilePath("README"))
if not file.open(QtCore.QFile.ReadOnly):
return
inputStream = QtCore.QTextStream(file)
paragraphs = []
currentPara = []
openQuote = True
while not inputStream.atEnd():
line = inputStream.readLine()
at = line.indexOf("\"", 0)
while at != -1:
if openQuote:
line.replace(at, 1, QtCore.QChar(QtCore.QChar.Punctuation_InitialQuote))
else:
line.replace(at, 1, QtCore.QChar(QtCore.QChar.Punctuation_FinalQuote))
openQuote = not openQuote
at = line.indexOf("\"", at)
if not line.trimmed().isEmpty():
currentPara.append(str(line.trimmed()))
elif len(currentPara) > 0:
paragraphs.append(" ".join(currentPara))
currentPara = []
else:
break
if len(currentPara) > 0:
paragraphs.append(" ".join(currentPara))
self.categories[categoryName]['description'] = "<p>"+"\n</p><p>".join(paragraphs)+"</p>"
def slotShowPage(self):
self.emit(QtCore.SIGNAL("showPage()"))
|
# coding: utf-8
# pip install beautifulsoup4 requests
from bs4 import BeautifulSoup as bs
import re
import utils
base_url = 'https://www.ravpower.jp/product-category/battery'
def crawl(url:str=base_url):
'''
製品の一覧を取得する。
'''
data = utils.fetch(url)
data_list = [data, ]
soup = bs(data, 'lxml')
# ページの一覧を順番に読み込む
page_list = list(set([a['href'] for a in soup.find('ul', class_='page-numbers').find_all('a', class_='page-numbers')]))
for page in page_list:
d = utils.fetch(page)
if d:
data_list.append(d)
# item list
output = []
for d in data_list:
soup = bs(d, 'lxml')
for item in soup.find('ul', class_='products').find_all('li'):
if not item.find('div', class_='product_details'):
continue
o = { 'manufacture': 'RAVPower' }
o['name'] = item.find('h3').string.strip()
o['url'] = item.find('a', class_='product_item_link')['href']
o['image'] = item.find('img')['src']
o['detail'] = crawl_detail(item.a['href'])
# 製品名から容量を推測
m = re.search(r'([1-9][0-9]*00)mAh', o['name'])
if m:
o['capacity'] = int(m.group(1))
output.append(o)
return output
def crawl_detail(url):
'''
製品の詳細を取得する。
'''
data = utils.fetch(url)
soup = bs(data, 'lxml')
details = {}
# Amazonへのリンク
for a in soup.find('div', class_='woocommerce-product-details__short-description').find_all('a'):
if a['href'].startswith('https://www.amazon.co.jp/'):
details['amazon'] = a['href']
# 仕様
for p in soup.find_all('p') + soup.find_all('li'):
if (p.text.find(':') != -1 or p.text.find(': ') != -1)and p.text.find('、') == -1 and p.text.find('。') == -1:
for s in p.text.splitlines():
# 1回半角':'で切って、駄目なら全角':'で切ってみる
cols = s.split(':')
if len(cols) != 2:
cols = s.split(':')
# それでも駄目なら飛ばす
if len(cols) != 2:
continue
# 空文字列も飛ばす
if len(cols[1].strip()) == 0:
continue
details[cols[0].strip()] = cols[1].strip()
return details
if __name__ == '__main__':
crawl(base_url)
|
#!/usr/bin/python3
import sys
import os
import signal
import argparse
import json
from datetime import datetime,timedelta
import subprocess
import random
import csv
#import awslib.patterns as aws
class GError(Exception):
'''Module exception'''
pass
KVP={ # key-value pair defaults/validators (overridden by command-line)
'settings': '~stdin',
}
def overrideKVP(overrides):
'''Override KVP dictionary defaults with command-line inputs'''
for k,v in overrides.items():
dfv,meta = KVP.get(k), KVP.get('_' + k)
if k.startswith('_') or dfv is None: raise GError('{} key unrecognized'.format(k))
if v in {'', '?'}:
if not meta or len(meta) < 3:
raise GError('specify value for {}; default is {}'.format(k, KVP[k]))
raise GError('specify value in [{},{}] range for {}; default is {}'.format(
meta[1], meta[2], k, KVP[k]))
if meta:
try:
if len(meta) == 1: # apply validator function to value
v = meta[0](k, v)
if v is None: raise GError('validation failed')
elif len(meta) >= 3: # convert string value to desired type with bounds check
try: ev = eval(v) if type(v) is str else v
except KeyboardInterrupt: raise
except: raise GError('invalid value')
v = meta[0](ev)
if v < meta[1]: v = meta[1]
elif v > meta[2]: v = meta[2]
except (ValueError, GError) as e:
raise GError('{} cannot be set ({})'.format(k, e))
KVP[k] = v
def terminate(sig, frame):
'''Raise a keyboard interrupt to terminate process'''
raise KeyboardInterrupt
def ex(err, code):
'''Exit process with specified return code'''
if err: sys.stderr.write(err)
sys.exit(code)
def getWriter(m, cols):
'''Return a CSV writer closure for cols output columns with interspersed metadata'''
section,flt,buf = '', str.maketrans('\n',' ','\r'), [
'#!begin gopher {} # at {}'.format(m, datetime.now().isoformat()),
'\t'.join(cols),
]
def csvWrite(s, row):
nonlocal m, cols, section, flt, buf
if row:
if s and s != section:
buf.append('\n#!section {}'.format(s))
section = s
buf.append('"{}"'.format('"\t"'.join([row.get(n,'').translate(flt).replace('"','""')
for n in cols])))
sys.stdout.write('{}\n'.format('\n'.join(buf)))
buf = []
elif not buf:
sys.stdout.write('\n#!end gopher {} # at {}\n'.format(m, datetime.now().isoformat()))
return csvWrite
def gophCDRASP(model, settings, inputs, args):
'''Fetch Ribbon switch CDRs from Aspect'''
if not settings.get('BinDir'): raise GError('no bin directory for {}'.format(model))
pipe,s = getWriter(model, [
'id','loc','begin','dur','type','from','to','dip','try','eTG','IP','iTG',
'cust','rate',
]), ''
with subprocess.Popen([settings.get('BinDir').rstrip('/')+'/goph_cdrasp.sh'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) as p:
for l in p.stdout:
if l.startswith('STOP,'):
for col in csv.reader([l]): break
if len(col) <= 238: continue
try: begin = datetime.strptime(col[5]+col[6][:-2],'%m/%d/%Y%H:%M:%S').isoformat()+'Z'
except KeyboardInterrupt: raise
except: continue
cust = '/'.join(col[51].split(',')[64:66]).rstrip('/')
pipe(s, {'id': col[2], # accounting ID
'loc': col[1], # gateway name (maps to service location)
'begin': begin, # start date/time
'dur': col[13], # call service duration
'type': col[17], # service provider (CARRIER/SDENUM for inbound?)
'from': col[19], # calling number (not always full E.164)
'to': col[20], # called number (not always full E.164)
'dip': col[23], # called number before translation #1 (LRN data)
'try': col[29], # route attempt number
# gateway:trunk group (outbound carrier)
'eTG': col[30].partition(':')[2],
'IP': col[31], # egress local signaling IP addr (non-routable for inbound)
'iTG': col[33], # ingress trunk group name (inbound carrier)
# ingress/egress protocol data (cust acct/app SMM fields)
'cust': cust if cust else '/'.join(col[68].split(',')[64:66]).rstrip('/'),
# LCR route billing data (outbound carrier USD rate)
'rate': col[238].partition('6:')[2].partition(';')[0],
})
elif l.startswith('#!begin '):
s = l[:-1].partition(' ')[2].partition('~link')[0]
pipe(None, None)
def main():
'''Parse command line args and release the gopher'''
gophModels = { # gopher model map
'cdr.asp': [gophCDRASP, 'fetch Ribbon switch CDRs from Aspect'],
}
# define and parse command line parameters
parser = argparse.ArgumentParser(description='''This gopher agent fetches Cloud Monitor content for an Aspect model''')
parser.add_argument('model', choices=gophModels, metavar='model',
help='''Aspect model; {} are supported'''.format(', '.join(gophModels)))
parser.add_argument('-o','--opt', action='append', metavar='option', default=[],
help='''gopher option''')
parser.add_argument('-k','--key', action='append', metavar='kvp', default=[],
help='''key-value pair of the form <k>=<v> (key one of: {})'''.format(
', '.join(['{} [{}]'.format(k, KVP[k]) for k in KVP
if not k.startswith('_')])))
args = parser.parse_args()
try: # release the gopher!
signal.signal(signal.SIGTERM, terminate)
overrideKVP({k.partition('=')[0].strip():k.partition('=')[2].strip() for k in args.key})
settings,inputs = json.loads(sys.stdin.readline().strip()) if KVP['settings'] == '~stdin' else json.load(open(KVP['settings'], 'r')), []
for line in sys.stdin:
inputs.append(json.loads(line.strip()))
gophModels[args.model][0](args.model, settings, inputs, args)
# handle exceptions; broken pipe exit avoids console errors
except json.JSONDecodeError: ex('\n** invalid JSON input **\n\n', 1)
except FileNotFoundError: ex('\n** settings not found **\n\n', 1)
except BrokenPipeError: os._exit(0)
except KeyboardInterrupt: ex('\n** gopher interrupted **\n\n', 10)
except (AssertionError, RuntimeError, AttributeError, KeyError, TypeError, IndexError, IOError,
GError) as e: ex('\n** {} **\n\n'.format(e if e else 'unknown exception'), 10)
if __name__ == '__main__': main() # called as gopher
else: pass # loaded as module
|
/anaconda3/lib/python3.6/genericpath.py
|
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from etl.views import PersonViewSet
router = routers.DefaultRouter()
router.register(r'persons', PersonViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('api', include('rest_framework.urls', namespace='rest_framework'))
]
|
# Configuration file where you can set the parameter default values and
# descriptions.
DEFAULT_COMP = 3
DEFAULT_MSC = 0
DEFAULT_MFC = 0
DEFAULT_MAXITER = 25
DEFAULT_FMETA = None
DEFAULT_COND = None
DESC_INIT = ("The number of initialization vectors. Larger values will"
"give more accurate factorization but will be more "
"computationally expensive [minimum 1]")
DESC_ITERATIONSALS = ("Max number of Alternating Least Square (ALS)"
" optimization iterations (suggested to be below 100;"
" beware of overfitting) [minimum 1]")
DESC_ITERATIONSRTPM = ("Max number of Robust Tensor Power Method (RTPM)"
" optimization iterations (suggested to be below 100;"
" beware of overfitting) [minimum 1]")
DESC_COMP = ("The underlying low-rank structure (suggested: 2 < rank < 10)"
" [minimum 2]")
DESC_MSC = "Minimum sum cutoff of sample across all features"
DESC_MFC = "Minimum sum cutoff of features across all samples"
DESC_OUT = "Location of output files."
DESC_FMETA = "Feature metadata file in QIIME2 formatting."
DESC_BIN = "Input table in biom format."
DESC_SMETA = "Sample metadata file in QIIME2 formatting."
DESC_SUBJ = ("Metadata column containing subject IDs to"
" use for pairing samples. WARNING: if"
" replicates exist for an individual ID at"
" either state_1 to state_N, that subject will"
" be mean grouped by default.")
DESC_COND = ("Metadata column containing state (e.g.,Time, BodySite)"
" across which samples are paired."
" At least one is required but up to four are allowed"
" by other state inputs.")
QORD = ("A trajectory is an ordination that can be visualized"
"over time or another context.")
QDIST = ("A sample-sample distance matrix generated from "
" the euclidean distance of the subject-state "
"ordinations and itself.")
QLOAD = ("Compositional biplot of subjects as points and features as arrows."
" Where the variation between subject groupings is explained by the"
" log-ratio between opposing arrows. "
"WARNING: The % variance explained is only spread over n_components "
"and can be inflated.")
QSOAD = ("Compositional biplot of states as points and features as arrows."
" Where the variation between subject groupings is explained by the"
" log-ratio between opposing arrows. "
"WARNING: The % variance explained is only spread over n_components "
"and can be inflated.")
|
#!/usr/bin/env python
from server.brain import JenniferBrain
from ioclients.terminal import JenniferTerminalClient
from ioclients.terminal_with_sound import JenniferTerminalWithSoundClient
brain = JenniferBrain(allow_network_plugins=True)
client = JenniferTerminalWithSoundClient(brain)
client.run()
"""
$: dollar
$ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$
'': closing quotation mark
' ''
(: opening parenthesis
( [ {
): closing parenthesis
) ] }
,: comma
,
--: dash
--
.: sentence terminator
. ! ?
:: colon or ellipsis
: ; ...
CC: conjunction, coordinating
& 'n and both but either et for less minus neither nor or plus so
therefore times v. versus vs. whether yet
CD: numeral, cardinal
mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-
seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025
fifteen 271,124 dozen quintillion DM2,000 ...
DT: determiner
all an another any both del each either every half la many much nary
neither no some such that the them these this those
EX: existential there
there
FW: foreign word
gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous
lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte
terram fiche oui corporis ...
IN: preposition or conjunction, subordinating
astride among uppon whether out inside pro despite on by throughout
below within for towards near behind atop around if like until below
next into if beside ...
JJ: adjective or numeral, ordinal
third ill-mannered pre-war regrettable oiled calamitous first separable
ectoplasmic battery-powered participatory fourth still-to-be-named
multilingual multi-disciplinary ...
JJR: adjective, comparative
bleaker braver breezier briefer brighter brisker broader bumper busier
calmer cheaper choosier cleaner clearer closer colder commoner costlier
cozier creamier crunchier cuter ...
JJS: adjective, superlative
calmest cheapest choicest classiest cleanest clearest closest commonest
corniest costliest crassest creepiest crudest cutest darkest deadliest
dearest deepest densest dinkiest ...
LS: list item marker
A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005
SP-44007 Second Third Three Two * a b c d first five four one six three
two
MD: modal auxiliary
can cannot could couldn't dare may might must need ought shall should
shouldn't will would
NN: noun, common, singular or mass
common-carrier cabbage knuckle-duster Casino afghan shed thermostat
investment slide humour falloff slick wind hyena override subhumanity
machinist ...
NNP: noun, proper, singular
Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos
Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA
Shannon A.K.C. Meltex Liverpool ...
NNPS: noun, proper, plural
Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists
Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques
Apache Apaches Apocrypha ...
NNS: noun, common, plural
undergraduates scotches bric-a-brac products bodyguards facets coasts
divestitures storehouses designs clubs fragrances averages
subjectivists apprehensions muses factory-jobs ...
PDT: pre-determiner
all both half many quite such sure this
POS: genitive marker
' 's
PRP: pronoun, personal
hers herself him himself hisself it itself me myself one oneself ours
ourselves ownself self she thee theirs them themselves they thou thy us
PRP$: pronoun, possessive
her his mine my our ours their thy your
RB: adverb
occasionally unabatingly maddeningly adventurously professedly
stirringly prominently technologically magisterially predominately
swiftly fiscally pitilessly ...
RBR: adverb, comparative
further gloomier grander graver greater grimmer harder harsher
healthier heavier higher however larger later leaner lengthier less-
perfectly lesser lonelier longer louder lower more ...
RBS: adverb, superlative
best biggest bluntest earliest farthest first furthest hardest
heartiest highest largest least less most nearest second tightest worst
RP: particle
aboard about across along apart around aside at away back before behind
by crop down ever fast for forth from go high i.e. in into just later
low more off on open out over per pie raising start teeth that through
under unto up up-pp upon whole with you
SYM: symbol
% & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***
TO: "to" as preposition or infinitive marker
to
UH: interjection
Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen
huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly
man baby diddle hush sonuvabitch ...
VB: verb, base form
ask assemble assess assign assume atone attention avoid bake balkanize
bank begin behold believe bend benefit bevel beware bless boil bomb
boost brace break bring broil brush build ...
VBD: verb, past tense
dipped pleaded swiped regummed soaked tidied convened halted registered
cushioned exacted snubbed strode aimed adopted belied figgered
speculated wore appreciated contemplated ...
VBG: verb, present participle or gerund
telegraphing stirring focusing angering judging stalling lactating
hankerin' alleging veering capping approaching traveling besieging
encrypting interrupting erasing wincing ...
VBN: verb, past participle
multihulled dilapidated aerosolized chaired languished panelized used
experimented flourished imitated reunifed factored condensed sheared
unsettled primed dubbed desired ...
VBP: verb, present tense, not 3rd person singular
predominate wrap resort sue twist spill cure lengthen brush terminate
appear tend stray glisten obtain comprise detest tease attract
emphasize mold postpone sever return wag ...
VBZ: verb, present tense, 3rd person singular
bases reconstructs marks mixes displeases seals carps weaves snatches
slumps stretches authorizes smolders pictures emerges stockpiles
seduces fizzes uses bolsters slaps speaks pleads ...
WDT: WH-determiner
that what whatever which whichever
WP: WH-pronoun
that what whatever whatsoever which who whom whosoever
WP$: WH-pronoun, possessive
whose
WRB: Wh-adverb
how however whence whenever where whereby whereever wherein whereof why
``: opening quotation mark
"""
|
import functools
from sqlagg.columns import *
from sqlagg.base import AliasColumn
from sqlagg.filters import *
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader, DataTablesColumnGroup, DTSortType
from corehq.apps.reports.graph_models import MultiBarChart, LineChart, Axis
from corehq.apps.reports.sqlreport import DatabaseColumn, SummingSqlTabularReport, AggregateColumn, calculate_total_row
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.standard.maps import GenericMapReport
from corehq.apps.reports.util import format_datatables_data
from corehq.apps.userreports.sql import get_table_name
from corehq.const import USER_MONTH_FORMAT
from corehq.util.dates import iso_string_to_date
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_date
from util import get_unique_combinations, capitalize_fn
from datetime import timedelta
class StaticColumn(AliasColumn):
column_key = None
def __init__(self, key, value):
super(StaticColumn, self).__init__(key)
self.value = value
def get_value(self, row):
return self.value
class GSIDSQLReport(SummingSqlTabularReport, CustomProjectReport, DatespanMixin):
fields = ['custom.apps.gsid.reports.TestField',
'corehq.apps.reports.filters.dates.DatespanFilter',
'custom.apps.gsid.reports.AsyncClinicField',
'custom.apps.gsid.reports.AggregateAtField']
exportable = True
emailable = True
default_aggregation = "clinic"
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.is_map = kwargs.pop('map', False)
super(GSIDSQLReport, self).__init__(request, base_context=base_context, domain=domain, **kwargs)
@property
def table_name(self):
return get_table_name(self.domain, 'patient_summary')
@property
def daterange_display(self):
format = "%d %b %Y"
st = self.datespan.startdate.strftime(format)
en = self.datespan.enddate.strftime(format)
return "%s to %s" % (st, en)
@property
def report_subtitles(self):
if self.needs_filters:
return []
subtitles = ["Date range: %s" % self.daterange_display]
if self.selected_fixture():
tag, id = self.selected_fixture()
location = FixtureDataItem.get(id).fields_without_attributes['%s_name' % tag]
subtitles.append('Location: %s' % location)
if self.disease:
location = FixtureDataItem.get(self.disease[1]).fields_without_attributes['disease_name']
subtitles.append('Disease: %s' % location)
if self.test_version:
test_version = FixtureDataItem.get(self.test_version[1]).fields_without_attributes['visible_test_name']
subtitles.append('Test Version: %s' % test_version)
return subtitles
@property
@memoized
def diseases(self):
disease_fixtures = FixtureDataItem.by_data_type(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "diseases").one()
)
return {
"ids": [d.fields_without_attributes["disease_id"] for d in disease_fixtures],
"names": [d.fields_without_attributes["disease_name"] for d in disease_fixtures]
}
@property
def test_types(self):
test_fixtures = FixtureDataItem.by_data_type(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "test").one()
)
return [t.fields_without_attributes["test_name"] for t in test_fixtures]
@property
def filter_values(self):
ret = dict(
domain=self.domain,
startdate=self.datespan.startdate_param,
enddate=self.datespan.enddate_param,
male="male",
female="female",
positive="POSITIVE"
)
DISEASES = self.diseases["ids"]
TESTS = self.test_types
ret.update(zip(DISEASES, DISEASES))
ret.update(zip(TESTS, TESTS))
return ret
@property
def filters(self):
return [EQ("domain", "domain"), BETWEEN("date", "startdate", "enddate")] + self.disease_filters
@property
def disease(self):
disease = self.request.GET.get('test_type_disease', '')
return disease.split(':') if disease else None
@property
def test_version(self):
test = self.request.GET.get('test_type_test', '')
return test.split(':') if test else None
@property
def disease_filters(self):
disease = self.disease
test = self.test_version
filters = []
if test:
filters.append(EQ("test_version", test[0]))
elif disease:
filters.append(EQ("disease_name", disease[0]))
return filters
@property
@memoized
def gps_key(self):
gps_key = "gps"
agg_at = self.request.GET.get('aggregate_at', None)
if agg_at and not agg_at == "clinic":
gps_key = "gps_" + agg_at
return gps_key
@property
def group_by(self):
return self.place_types
@property
def keys(self):
combos = get_unique_combinations(self.domain, place_types=self.place_types, place=self.selected_fixture())
for c in combos:
yield [c[pt] for pt in self.place_types]
def selected_fixture(self):
fixture = self.request.GET.get('fixture_id', "")
return fixture.split(':') if fixture else None
@property
@memoized
def place_types(self):
opts = ['country', 'province', 'district', 'clinic']
agg_at = self.request.GET.get('aggregate_at', None)
agg_at = agg_at if agg_at and opts.index(agg_at) <= opts.index(self.default_aggregation) else self.default_aggregation
place = self.selected_fixture()
agg_at = place[0] if place and opts.index(agg_at) < opts.index(place[0]) else agg_at
return opts[:opts.index(agg_at) + 1]
@property
def common_columns(self):
columns = []
for place in self.place_types:
columns.append(DatabaseColumn(place.capitalize(), SimpleColumn(place), format_fn=capitalize_fn))
return columns
class GSIDSQLPatientReport(GSIDSQLReport):
name = "Patient Summary Report"
slug = "patient_summary_sql"
section_name = "patient summary"
age_range_map = {'male': [None, None], 'female': [None, None], 'total': [None, None]}
def age_fn(self, key, min, max):
age_range = self.age_range_map[key]
if min is not None and (age_range[0] is None or min < age_range[0]):
self.age_range_map[key][0] = min
if max is not None and (age_range[1] is None or max > age_range[1]):
self.age_range_map[key][1] = max
return self.format_age_range(min, max)
def format_age_range(self, min, max):
return str(min if min is not None else "-") + " - " + str(max if max is not None else "-")
def percent_agg_fn(self, x, t):
return dict(sort_key=x or 0, html="%(x)s (%(p)s%%)" % \
{
"x": x or 0,
"p": (100 * int(x or 0) / (t or 1))
})
@property
def columns(self):
sum_fn = lambda x, y: int(x or 0) + int(y or 0)
total_percent_agg_fn = lambda f_pos, m_pos, f_tot, m_tot: dict(sort_key=sum_fn(f_pos, m_pos), html="%(x)s (%(p)s%%)" % \
{
"x": sum_fn(f_pos, m_pos),
"p": (100 * sum_fn(f_pos, m_pos) / (sum_fn(m_tot, f_tot) or 1))
})
patient_number_group = DataTablesColumnGroup("Tests")
positive_group = DataTablesColumnGroup("Positive Tests")
age_range_group = DataTablesColumnGroup("Age Range")
male_filter = EQ("gender", "male")
female_filter = EQ("gender", "female")
columns = self.common_columns + [
DatabaseColumn(
"Number of Males ",
CountColumn('doc_id', alias="male-total", filters=self.filters + [male_filter]),
header_group=patient_number_group
),
DatabaseColumn(
"Number of Females ",
CountColumn('doc_id', alias="female-total", filters=self.filters + [female_filter]),
header_group=patient_number_group
),
AggregateColumn(
"Total", sum_fn,
[AliasColumn("male-total"), AliasColumn("female-total")],
header_group=patient_number_group
),
AggregateColumn(
"Male +ve Percent", self.percent_agg_fn,
[
CountColumn(
'doc_id',
alias="male-positive",
filters=self.filters + [AND([male_filter, EQ("diagnosis", "positive")])]
),
AliasColumn("male-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Female +ve Percent", self.percent_agg_fn,
[
CountColumn('doc_id',
alias="female-positive",
filters=self.filters + [AND([female_filter, EQ("diagnosis", "positive")])]
),
AliasColumn("female-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Total +ve Percent", total_percent_agg_fn,
[
AliasColumn("female-positive"),
AliasColumn("male-positive"),
AliasColumn("female-total"), AliasColumn("male-total")
],
header_group=positive_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Male age range", functools.partial(self.age_fn, 'male'),
[
MinColumn("age", alias="male-min", filters=self.filters + [male_filter]),
MaxColumn("age", alias="male-max", filters=self.filters + [male_filter])
],
header_group=age_range_group
),
AggregateColumn(
"Female age range", functools.partial(self.age_fn, 'female'),
[
MinColumn("age", alias="female-min", filters=self.filters + [female_filter]),
MaxColumn("age", alias="female-max", filters=self.filters + [female_filter])
],
header_group=age_range_group
),
AggregateColumn(
"All age range", functools.partial(self.age_fn, 'total'),
[
MinColumn("age", alias="age-min", filters=self.filters + [OR([female_filter, male_filter])]),
MaxColumn("age", alias="age-max", filters=self.filters + [OR([female_filter, male_filter])])
],
header_group=age_range_group
),
]
if self.is_map:
columns.append(DatabaseColumn("gps", MaxColumn(self.gps_key), format_fn=lambda x: x))
disease = FixtureDataItem.get(self.disease[1]).fields_without_attributes['disease_name'] if self.disease else 'All diseases'
columns.append(DatabaseColumn('disease', StaticColumn('disease', disease)))
return columns
@property
def rows(self):
rows = super(GSIDSQLPatientReport, self).rows
self.total_row[0] = 'Total'
# total age ranges
col_start = -5 if self.is_map else -3
self.total_row[col_start] = self.format_age_range(self.age_range_map['male'][0], self.age_range_map['male'][1])
self.total_row[col_start+1] = self.format_age_range(self.age_range_map['female'][0], self.age_range_map['female'][1])
self.total_row[col_start+2] = self.format_age_range(self.age_range_map['total'][0], self.age_range_map['total'][1])
# formatted percent totals
pos_col_start = -8 if self.is_map else -6
tot_col_start = -11 if self.is_map else -9
m_tot = self.total_row[tot_col_start]
f_tot = self.total_row[tot_col_start+1]
tot = self.total_row[tot_col_start+2]
m_pos = self.total_row[pos_col_start]
f_pos = self.total_row[pos_col_start+1]
tot_pos = self.total_row[pos_col_start+2]
self.total_row[pos_col_start] = self.percent_agg_fn(m_pos, m_tot)
self.total_row[pos_col_start+1] = self.percent_agg_fn(f_pos, f_tot)
self.total_row[pos_col_start+2] = self.percent_agg_fn(tot_pos, tot)
return rows
@property
def charts(self):
rows = self.rows
loc_axis = Axis(label="Location")
tests_axis = Axis(label="Number of Tests", format=",.1d")
chart = MultiBarChart("Number of Tests Per Location", loc_axis, tests_axis)
chart.stacked = True
chart.tooltipFormat = " in "
chart.add_dataset(
"Male Tests",
[{'x':row[-10], 'y':row[-9]['html'] if row[-9] != "--" else 0} for row in rows],
color="#0006CE"
)
chart.add_dataset(
"Female Tests",
[{'x':row[-10], 'y':row[-8]['html'] if row[-8] != "--" else 0} for row in rows],
color="#70D7FF"
)
return [chart]
class GSIDSQLByDayReport(GSIDSQLReport):
name = "Day Summary Report"
slug = "day_summary_sql"
section_name = "day summary"
@property
def group_by(self):
return super(GSIDSQLByDayReport, self).group_by + ["date", "disease_name"]
@property
def columns(self):
return self.common_columns + \
[
DatabaseColumn("Count", CountColumn("doc_id", alias="day_count")),
DatabaseColumn("disease", SimpleColumn("disease_name", alias="disease_name"))
]
def daterange(self, start_date, end_date):
for n in range(int((end_date - start_date).days) + 1):
yield json_format_date(start_date + timedelta(n))
@property
def headers(self):
startdate = self.datespan.startdate
enddate = self.datespan.enddate
column_headers = []
group_by = self.group_by[:-2]
for place in group_by:
column_headers.append(DataTablesColumn(place.capitalize()))
column_headers.append(DataTablesColumn("Disease"))
prev_month = startdate.month
month_columns = [startdate.strftime(USER_MONTH_FORMAT)]
for n, day in enumerate(self.daterange(startdate, enddate)):
day_obj = iso_string_to_date(day)
month = day_obj.month
day_column = DataTablesColumn("Day%(n)s (%(day)s)" % {'n':n+1, 'day': day})
if month == prev_month:
month_columns.append(day_column)
else:
month_group = DataTablesColumnGroup(*month_columns)
column_headers.append(month_group)
month_columns = [day_obj.strftime(USER_MONTH_FORMAT), day_column]
prev_month = month
month_group = DataTablesColumnGroup(*month_columns)
column_headers.append(month_group)
return DataTablesHeader(*column_headers)
@property
def rows(self):
startdate = self.datespan.startdate
enddate = self.datespan.enddate
old_data = self.data
rows = []
for loc_key in self.keys:
selected_disease = self.request.GET.get('test_type_disease', '')
selected_disease = selected_disease.split(':') if selected_disease else None
diseases = [selected_disease[0]] if selected_disease else self.diseases["ids"]
for disease in diseases:
row = [capitalize_fn(x) for x in loc_key]
disease_names = self.diseases["names"]
index = self.diseases['ids'].index(disease)
row.append(disease_names[index])
for n, day in enumerate(self.daterange(startdate, enddate)):
temp_key = [loc for loc in loc_key]
temp_key.append(iso_string_to_date(day))
temp_key.append(disease)
keymap = old_data.get(tuple(temp_key), None)
day_count = (keymap["day_count"] if keymap else None)
row.append(format_datatables_data(day_count or self.no_value, day_count or 0))
rows.append(row)
self.total_row = calculate_total_row(rows)
self.total_row[0] = 'Total'
return rows
@property
def charts(self):
rows = self.rows
date_index = len(self.place_types)
startdate = self.datespan.startdate
enddate = self.datespan.enddate
date_axis = Axis(label="Date", dateFormat="%b %d")
tests_axis = Axis(label="Number of Tests")
chart = LineChart("Number of Tests Per Day", date_axis, tests_axis)
for row in rows:
data_points = []
for n, day in enumerate(self.daterange(startdate, enddate)):
x = day
y = 0 if row[date_index + n + 1] == "--" else row[date_index + n + 1]
data_points.append({'x': x, 'y': y['sort_key']})
chart.add_dataset(row[date_index-1] + "(" + row[date_index] + ")", data_points)
return [chart]
class GSIDSQLTestLotsReport(GSIDSQLReport):
name = "Test Lots Report"
slug = "test_lots_sql"
section_name = "test lots"
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return user and user.is_previewer()
@property
def group_by(self):
return super(GSIDSQLTestLotsReport, self).group_by + ["test_version", "lot_number"]
@property
def columns(self):
return self.common_columns + [
DatabaseColumn("Test", CountColumn('doc_id', alias="lot_count"))
]
@property
def test_lots_map(self):
old_data = self.data
lots_map = dict()
for key in old_data.keys():
if lots_map.get(key[-2], None):
lots_map[key[-2]].append(key[-1])
else:
lots_map[key[-2]] = [key[-1]]
return lots_map
@property
def selected_tests(self):
disease = self.request.GET.get('test_type_disease', '')
test = self.request.GET.get('test_type_test', '')
disease = disease.split(':') if disease else None
test = test.split(':') if test else None
if test:
return [test[0]]
elif disease:
test_fixtures = FixtureDataItem.by_field_value(
self.domain,
FixtureDataType.by_domain_tag(self.domain, "test").one(),
"disease_id",
disease[0]
)
return [t.fields_without_attributes["test_name"] for t in test_fixtures]
else:
return self.test_types
@property
def rows(self):
test_lots_map = self.test_lots_map
selected_tests = self.selected_tests
old_data = self.data
rows = []
for loc_key in self.keys:
row = [capitalize_fn(loc) for loc in loc_key]
for test in selected_tests:
test_lots = test_lots_map.get(test, None)
if not test_lots:
row.append(format_datatables_data(self.no_value, 0))
continue
total_test_count = 0
for lot_number in test_lots:
temp_key = [loc for loc in loc_key] + [test, lot_number]
data_map = old_data.get(tuple(temp_key), None)
lot_count = data_map["lot_count"] if data_map else None
row.append(format_datatables_data(lot_count or self.no_value, lot_count or 0))
total_test_count += data_map["lot_count"] if data_map else 0
row.append(format_datatables_data(total_test_count or self.no_value, total_test_count or 0))
rows.append(row)
self.total_row = calculate_total_row(rows)
self.total_row[0] = 'Total'
return rows
@property
def headers(self):
column_headers = [DataTablesColumn(loc.capitalize()) for loc in self.group_by[:-2]]
test_lots_map = self.test_lots_map
for test in self.selected_tests:
lots_headers = [test]
lots = test_lots_map.get(test, None)
if not lots:
lots_headers.append(DataTablesColumn("NO-LOTS"))
column_headers.append(DataTablesColumnGroup(*lots_headers))
continue
for lot in lots:
lots_headers.append(DataTablesColumn(str(lot)))
lots_headers.append(DataTablesColumn("TOTAL"))
column_headers.append(DataTablesColumnGroup(*lots_headers))
return DataTablesHeader(*column_headers)
class GSIDSQLByAgeReport(GSIDSQLReport):
name = "Age Summary Report"
slug = "age_summary_sql"
section_name = "age summary"
@property
def filter_values(self):
age_filters = dict(
zero=0,
ten=10,
ten_plus=11,
twenty=20,
twenty_plus=21,
fifty=50
)
default_filter_values = super(GSIDSQLByAgeReport, self).filter_values
default_filter_values.update(age_filters)
return default_filter_values
def percent_fn(self, x, y):
return dict(
sort_key=x or 0,
html="%(x)s (%(p)s%%)" % {"x": int(x or 0), "p": 100*(x or 0) / (y or 1)})
@property
def columns(self):
female_range_group = DataTablesColumnGroup("Female Positive Tests (% positive)")
male_range_group = DataTablesColumnGroup("Male Positive Tests (% positive)")
def age_range_filter(gender, age_from, age_to):
return [AND([EQ("gender", gender), EQ("diagnosis", "positive"), BETWEEN("age", age_from, age_to)])]
def generate_columns(gender):
age_range_group = male_range_group if gender is "male" else female_range_group
return [
AggregateColumn(
"0-10", self.percent_fn,
[
CountColumn(
'doc_id',
alias="zero_ten_" + gender,
filters=self.filters + age_range_filter(gender, "zero", "ten")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"10-20", self.percent_fn,
[
CountColumn(
'doc_id',
alias="ten_twenty_" + gender,
filters=self.filters + age_range_filter(gender, "ten_plus", "twenty")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"20-50", self.percent_fn,
[
CountColumn(
'doc_id',
alias="twenty_fifty_" + gender,
filters= self.filters + age_range_filter(gender, "twenty_plus", "fifty")
),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"50+", self.percent_fn,
[
CountColumn(
'doc_id',
alias="fifty_" + gender,
filters=self.filters + [AND([EQ("gender", gender), EQ("diagnosis", "positive"), GT("age", "fifty")])]),
AliasColumn(gender + "_total")
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
AggregateColumn(
"Total", self.percent_fn,
[
CountColumn(
'doc_id',
alias="positive_total_" + gender,
filters=self.filters + [AND([EQ("gender", gender), EQ("diagnosis", "positive")])]),
CountColumn(
'doc_id',
alias=gender + "_total",
filters=self.filters + [EQ("gender", gender)]),
],
header_group=age_range_group, sort_type=DTSortType.NUMERIC
),
]
totals_group = DataTablesColumnGroup("Total tests")
sum_fn = lambda x, y: int(x or 0) + int(y or 0)
return self.common_columns + [
DatabaseColumn(
"Males ",
AliasColumn("male_total"),
header_group=totals_group
),
DatabaseColumn(
"Females ",
AliasColumn("female_total"),
header_group=totals_group
),
AggregateColumn(
"Total", sum_fn,
[AliasColumn("male_total"), AliasColumn("female_total")],
header_group=totals_group
),
] + generate_columns("male") + generate_columns("female")
@property
def rows(self):
rows = super(GSIDSQLByAgeReport, self).rows
self.total_row[0] = 'Total'
# custom total row formatting
tot_col_start = -13
m_tot = self.total_row[tot_col_start]
f_tot = self.total_row[tot_col_start+1]
m_pos_start = -10
self.total_row[m_pos_start] = self.percent_fn(self.total_row[m_pos_start], m_tot)
self.total_row[m_pos_start+1] = self.percent_fn(self.total_row[m_pos_start+1], m_tot)
self.total_row[m_pos_start+2] = self.percent_fn(self.total_row[m_pos_start+2], m_tot)
self.total_row[m_pos_start+3] = self.percent_fn(self.total_row[m_pos_start+3], m_tot)
self.total_row[m_pos_start+4] = self.percent_fn(self.total_row[m_pos_start+4], m_tot)
f_pos_start = -5
self.total_row[f_pos_start] = self.percent_fn(self.total_row[f_pos_start], f_tot)
self.total_row[f_pos_start+1] = self.percent_fn(self.total_row[f_pos_start+1], f_tot)
self.total_row[f_pos_start+2] = self.percent_fn(self.total_row[f_pos_start+2], f_tot)
self.total_row[f_pos_start+3] = self.percent_fn(self.total_row[f_pos_start+3], f_tot)
self.total_row[f_pos_start+4] = self.percent_fn(self.total_row[f_pos_start+4], f_tot)
return rows
class PatientMapReport(GenericMapReport, CustomProjectReport):
name = "Patient Summary (Map)"
slug = "patient_summary_map"
fields = ['custom.apps.gsid.reports.TestField',
'corehq.apps.reports.filters.dates.DatespanFilter',
'custom.apps.gsid.reports.AsyncClinicField',
'custom.apps.gsid.reports.AggregateAtField']
data_source = {
'adapter': 'legacyreport',
'geo_column': 'gps',
'report': 'custom.apps.gsid.reports.sql_reports.GSIDSQLPatientReport',
'report_params': {'map': True}
}
@property
def display_config(self):
return {
'column_titles': {
'Positive Tests::Female +ve Percent': 'Positive tests: Female',
'Positive Tests::Male +ve Percent': 'Positive tests: Male',
'Positive Tests::Total +ve Percent': 'Positive tests: Total',
'Tests::Number of Females ': 'Total tests: Female',
'Tests::Number of Males ': 'Total tests: Male',
'Tests::Total': 'Total tests',
'Age Range::All age range': 'Age range: All',
'Age Range::Female age range': 'Age range: Female',
'Age Range::Male age range': 'Age range: Male',
'disease': 'Disease',
},
'detail_columns': self.place_types + [
'disease',
'__space__',
'Positive Tests::Female +ve Percent',
'Positive Tests::Male +ve Percent',
'Positive Tests::Total +ve Percent',
'Tests::Number of Females ',
'Tests::Number of Males ',
'Tests::Total',
],
'table_columns': self.place_types + [
'Tests::Number of Females ',
'Tests::Number of Males ',
'Tests::Total',
'Positive Tests::Female +ve Percent',
'Positive Tests::Male +ve Percent',
'Positive Tests::Total +ve Percent',
'Age Range::Female age range',
'Age Range::Male age range',
'Age Range::All age range',
],
'detail_template': """<div class="default-popup">
<table>
<% _.each(info, function(field) { %>
<tr class="data data-<%= field.slug %>">
<% if (field.slug === '__space__') { %>
<td> </td><td> </td>
<% } else { %>
<td><%= field.label %></td>
<td class="detail_data">
<%= field.value %>
</td>
<% } %>
</tr>
<% }); %>
</table>
</div>"""
}
@property
def agg_level(self):
agg_at = self.request.GET.get('aggregate_at', None)
return agg_at if agg_at else 'clinic'
@property
def place_types(self):
opts = ['country', 'province', 'district', 'clinic']
agg_at = self.agg_level
return [o.title() for o in opts[:opts.index(agg_at) + 1]]
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.decorators.cache import cache_page
from contactmps import views
from django.views.decorators.csrf import csrf_exempt
# Used to cache expensive API calls, since it's fine to show same results for
# a few minutes. This cache is reset on each deployment. Corresponding caching
# headers are sent to the client, too.
API_CACHE_SECS = 5 * 60
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^campaign/(?P<campaign_slug>[a-z0-9-]+)/embedded-preview/$', views.embedded_preview,
name='embedded-preview'),
url(r'^campaign/(?P<campaign_slug>[a-z0-9-]+)/$', cache_page(API_CACHE_SECS)(views.campaign),
name='campaign'),
url(r'^campaign/(?P<campaign_slug>[a-z0-9-]+)/email/$', cache_page(API_CACHE_SECS)(views.campaign_email), name='campaign-emails'),
url(r'^email/$', csrf_exempt(views.email), name='email'),
url(r'^email/(?P<secure_id>[a-z0-9-]+)/$', cache_page(API_CACHE_SECS)(views.email_detail),
name='email-detail'),
# API
url(r'^api/v1/email/$', csrf_exempt(views.api_email), name='api-email'),
url(r'^api/v1/email/(?P<secure_id>[a-z0-9-]+)/qa/$', csrf_exempt(views.api_qa), name='api-qa'),
#url(r'^api/v1/campaign/(?P<campaign_slug>[a-z0-9-]+)/email/$', cache_page(API_CACHE_SECS)(views.api_campaign_email), name='api-campaign-emails'),
# UTM - This rather strict regex is part of ensuring we don't let people just
# inject what they like into a response we give. Think before changing.
url(r'^campaign/[a-z0-9-]+/(?P<utm_medium>[a-z]{2})/$', views.add_utm,
name='campaign-add-utm'),
url(r'^email/[a-z0-9-]+/(?P<utm_medium>[a-z]{2})/$', views.add_utm, name='email-add-utm'),
url(r'^admin/', include(admin.site.urls)),
url(r'^robots.txt$', views.robots),
]
|
import os
import pandas as pd
import csv
import numpy as np
from sklearn.metrics import accuracy_score
stage_csv = os.path.join("dataset_sleep", "0519", "mi_stage.csv")
predict_stage_csv = os.path.join("dataset_sleep", "0519", "predict.csv")
vitial_sig = pd.read_csv(stage_csv)
predict_vitial_sig = pd.read_csv(predict_stage_csv)
vitial_sig_time = vitial_sig["datetime"].values
vitial_sig_stage = vitial_sig["sleep"].values
predict_vitial_sig_time = predict_vitial_sig["datetime"].values
predict_vitial_sig_predict = predict_vitial_sig["sleep"].values+2
if len(predict_vitial_sig_time) == len(vitial_sig_time):
for i in range(len(predict_vitial_sig_time)):
if predict_vitial_sig_time[i] != vitial_sig_time[i]:
print(predict_vitial_sig_time[i])
print(vitial_sig_time[i])
print("時間有錯")
break
else:
print("時間長度有錯")
print("Accuracy_score: ", accuracy_score(vitial_sig_stage, predict_vitial_sig_predict))
|
from django.contrib import admin
from .models import File
admin.site.register(File)
|
import matplotlib.image as im
import numpy as np
import cv2
import glob
import pickle
def getCalibrationMatrix (imgs_path,nx,ny):
#Create listst to hold the corner points for images and real coordinates
objpoints = []
imgpoints = []
#Construct the real life 3D points meshgrid for x,y and z = 0 for all points (assuming flat surface)
objp = np.zeros(((nx*ny),3),np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
#Obtain points for each image
for path in imgs_path:
img = im.imread(path)
#Detect corners
res,corners = cv2.findChessboardCorners(img,(nx,ny))
if res : #Corners found
objpoints.append(objp)
imgpoints.append(corners)
#Extract the size of the last image
size = (img.shape[1],img.shape[0])
#Compute calibration Matrix
_,cmt,dist,_,_ = cv2.calibrateCamera(objpoints,imgpoints,size,None,None)
return (cmt,dist)
####################################################
### Get Camera Calibration Matrix (ONLY ONCE)#######
####################################################
#grid size
nx = 9
ny = 6
path = glob.glob('camera_cal/calibration*.JPG')
cmt,dist = getCalibrationMatrix(path,nx,ny)
# Save the camera calibration result for later use
dist_pickle = {}
dist_pickle["cmt"] = cmt
dist_pickle["dist"] = dist
dist_pickle["dx"] = nx
dist_pickle["dy"] = ny
pickle.dump( dist_pickle, open( "camera_cal/calib_mtx.p", "wb" ) )
#Print that the program is done
print("Camera Matrix has been saved successfully !")
|
from .rna_em import EMTrainer
__all__ = [
'EMTrainer',
]
|
import pylast
import time
from platypush.message.response import Response
from .. import Plugin
class LastfmPlugin(Plugin):
def __init__(self, api_key, api_secret, username, password):
self.api_key = api_key
self.api_secret = api_secret
self.username = username
self.password = password
self.lastfm = pylast.LastFMNetwork(
api_key = self.api_key,
api_secret = self.api_secret,
username = self.username,
password_hash = pylast.md5(self.password))
def scrobble(self, artist, title, album=None, **kwargs):
self.lastfm.scrobble(
artist = artist,
title = title,
album = album,
timestamp = int(time.time()),
)
return Response()
def update_now_playing(self, artist, title, album=None, **kwargs):
self.lastfm.update_now_playing(
artist = artist,
title = title,
album = album,
)
return Response()
# vim:sw=4:ts=4:et:
|
import random
from .competition_errors import FakeError
class FakeDebugger(object):
def __init__(self, proxy, mult=0.1):
self.proxy = proxy
if isinstance(mult, basestring):
mult = float(mult)
self.mult = mult
def get_times(self):
if self.mult > 1.0:
return int(self.mult)
if self.mult < 0.0:
return 0
r = random.random()
if r < self.mult:
return 1
return 0
def process_flow_mod(self, dpid, flow_mod, _):
t = self.get_times()
for _ in xrange(t):
self.proxy.log_event(self, FakeError(dpid, flow_mod))
def process_flow_removed(self, dpid, flow_rem):
pass
|
#!/usr/bin/env python3
#
# Copyright (c) 2022 Samuel J. McKelvie
#
# MIT License - See LICENSE file accompanying this package.
#
"""Tools to assist with installation/provisioning of the dev environment"""
from .os_packages import (PackageList, create_os_group, get_dpkg_arch, get_os_package_version,
install_apt_sources_list_if_missing,
install_gpg_keyring_if_missing, install_os_packages,
invalidate_os_package_list, os_group_add_user,
os_package_is_installed, uninstall_os_packages,
update_and_install_os_packages,
update_and_upgrade_os_packages,
update_apt_sources_list, update_gpg_keyring,
update_os_package_list, upgrade_os_packages)
from .poetry import (get_poetry_prog, get_poetry_version, install_poetry,
poetry_is_installed)
from .pulumi import (default_pulumi_dir, get_pulumi_prog,
get_pulumi_version, get_pulumi_latest_version,
get_pulumi_username, install_pulumi,
pulumi_is_installed)
from .util import (CalledProcessErrorWithStderrMessage, check_version_ge,
chown_root, command_exists,
download_url_file, file_contents, files_are_identical,
find_command_in_path, get_all_os_groups,
get_current_architecture, get_current_os_user,
get_file_hash_hex, get_gid_of_group, get_linux_distro_name,
get_os_groups_of_current_process, get_os_groups_of_user,
get_tmp_dir, os_group_exists,
os_group_includes_current_process, os_group_includes_user,
run_once, running_as_root, searchpath_append,
searchpath_contains_dir, searchpath_force_append,
searchpath_join, searchpath_normalize,
searchpath_parts_append, searchpath_parts_contains_dir,
searchpath_parts_force_append, searchpath_parts_prepend,
searchpath_parts_prepend_if_missing,
searchpath_parts_remove_dir, searchpath_prepend,
searchpath_prepend_if_missing, searchpath_remove_dir,
searchpath_split, should_run_with_group, sudo_call,
sudo_check_call, sudo_check_output,
sudo_check_output_stderr_exception, sudo_Popen, unix_mv)
|
import sys
import json
import boto3
sys.path.insert(0, "../src")
import bioims
trainClient = bioims.client('train')
embeddingName='bbbc021'
filterBucket=''
filterKey=''
executeProcessPlate='true'
useSpot='false'
r = trainClient.train(embeddingName, filterBucket, filterKey, executeProcessPlate, useSpot)
print(r)
|
"""
coding:utf-8
file: log.py
@author: virgil
@contact: XXXX@163.com
@time: 2020-01-03 20:59
@desc:
"""
import logging
import os
from Commons.Path import log_path
class Log:
@classmethod
def logs(cls, log_name=None, file_name=None):
if log_name is None:
log_name = 'virgil'
if file_name is None:
file_name = 'ui_test.log'
format_str = '%(asctime)s - [%(filename)s-->line:%(lineno)d] - %(name)s:%(levelname)s: %(message)s'
log_format = logging.Formatter(format_str)
log = logging.getLogger(log_name)
log.setLevel('DEBUG')
output = logging.StreamHandler()
output.setLevel('INFO')
output.setFormatter(log_format)
log.addHandler(output)
output_file = logging.FileHandler(os.path.join(log_path, file_name), 'w', encoding='utf8')
output_file.setLevel('INFO')
output_file.setFormatter(log_format)
log.addHandler(output_file)
return log
log = Log.logs()
if __name__ == '__main__':
log.info('年后')
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CouponsConfig(AppConfig):
name = 'apps.coupons'
verbose_name = _('Coupon')
verbose_name_plural = _('Coupons')
|
import os
from typing import Optional
from fastapi import APIRouter, HTTPException, Body
from fastapi.responses import HTMLResponse
from wechatpy import parse_message, create_reply
from wechatpy.utils import check_signature
from wechatpy.crypto import WeChatCrypto
from wechatpy.exceptions import (
InvalidSignatureException,
InvalidAppIdException,
)
from utils.fetch_vmess import fetch_info
router = APIRouter()
TOKEN = os.environ.get('WECHAT_TOKEN', '')
AES_KEY = os.environ.get('WECHAT_AESKEY', '')
APP_ID = os.environ.get('WECHAT_APPID', '')
@router.get('/wechat')
async def wechat(
signature: str,
timestamp: str,
nonce: str,
echostr: str
):
try:
check_signature(TOKEN, signature, timestamp, nonce)
except InvalidSignatureException:
raise HTTPException(status_code=403)
return HTMLResponse(content=echostr)
@router.post('/wechat')
async def wechat(
msg_signature: Optional[str] = None,
timestamp: Optional[str] = None,
nonce: Optional[str] = None,
encrypt_type: Optional[str] = 'raw',
body_msg: str = Body(..., media_type='application/html')
):
if encrypt_type == 'raw':
# plaintext mode
msg = parse_message(body_msg)
if msg.type != 'text':
reply = create_reply('暂不支持该类型_(:зゝ∠)_', msg)
else:
reply = await fetch_info(msg.content)
reply = create_reply(reply, msg)
return HTMLResponse(reply.render())
else:
# encryption mode
crypto = WeChatCrypto(TOKEN, AES_KEY, APP_ID)
try:
body_msg = crypto.decrypt_message(
body_msg,
msg_signature,
timestamp,
nonce
)
except (InvalidSignatureException, InvalidAppIdException):
raise HTTPException(status_code=403)
else:
msg = parse_message(body_msg)
if msg.type != 'text':
reply = create_reply('暂不支持该类型_(:зゝ∠)_', msg)
else:
reply = await fetch_info(msg.content)
reply = create_reply(reply, msg)
return HTMLResponse(crypto.encrypt_message(reply.render(), nonce, timestamp))
|
# V0
# IDEA : DFS
class Solution:
def findSecondMinimumValue(self, root):
result=[]
self.dfs(root, result)
result_=sorted(set(result))
return result_[1] if len(result_) > 1 else -1
def dfs(self,root, result):
if not root:
return
self.dfs(root.left, result)
result.append(root.val)
self.dfs(root.right, result)
# V0'
class Solution:
def findSecondMinimumValue(self, root: TreeNode) -> int:
values = []
def getValues(root):
if not root:
return
values.append(root.val)
if root.left:
getValues(root.left)
if root.right:
getValues(root.right)
getValues(root)
values = sorted(set(values))
if len(values) > 1:
return values[1]
return -1
# V1
# https://leetcode.com/problems/second-minimum-node-in-a-binary-tree/discuss/455080/Python3-simple-solution-using-set()-function
# IDEA : DFS
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findSecondMinimumValue(self, root: TreeNode) -> int:
values = []
def getValues(root):
if not root:
return
values.append(root.val)
if root.left:
getValues(root.left)
if root.right:
getValues(root.right)
getValues(root)
values = sorted(set(values))
if len(values) > 1:
return values[1]
return -1
### Test case : dev
#s=Solution()
#s.findSecondMinimumValue([2,2,5,None,None,5,7])
# V1'
# http://bookshadow.com/weblog/2017/09/03/leetcode-second-minimum-node-in-a-binary-tree/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0x80000000
minVal = root.val
def traverse(root):
if not root: return
if self.ans > root.val > minVal:
self.ans = root.val
traverse(root.left)
traverse(root.right)
traverse(root)
return self.ans if self.ans != 0x80000000 else -1
# V1''
# https://leetcode.com/problems/second-minimum-node-in-a-binary-tree/solution/
# IDEA : BRUTE FORCE
class Solution(object):
def findSecondMinimumValue(self, root):
def dfs(node):
if node:
uniques.add(node.val)
dfs(node.left)
dfs(node.right)
uniques = set()
dfs(root)
min1, ans = root.val, float('inf')
for v in uniques:
if min1 < v < ans:
ans = v
return ans if ans < float('inf') else -1
# V1'''
# IDEA : AD-HOC
# https://leetcode.com/problems/second-minimum-node-in-a-binary-tree/solution/
def findSecondMinimumValue(self, root):
self.ans = float('inf')
min1 = root.val
def dfs(node):
if node:
if min1 < node.val < self.ans:
self.ans = node.val
elif node.val == min1:
dfs(node.left)
dfs(node.right)
dfs(root)
return self.ans if self.ans < float('inf') else -1
# V1''''
# https://www.jianshu.com/p/5b1de2697e1b
import sys
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findSecondMinimumValue(self, root):
self.min = sys.maxsize
self.second_min = sys.maxsize
def traverse(node):
if node:
if node.val < self.min:
self.second_min = self.min
self.min = node.val
elif node.val < self.second_min and node.val != self.min:
self.second_min = node.val
traverse(node.left)
traverse(node.right)
traverse(root)
if self.second_min != sys.maxsize:
return self.second_min
return -1
# root = TreeNode(2)
# root.left = TreeNode(2)
# root.right = TreeNode(5)
# root.right.left = TreeNode(5)
# root.right.right = TreeNode(7)
# assert Solution().findSecondMinimumValue(root) == 5
# V2
# Time: O(n)
# Space: O(h)
import heapq
class Solution(object):
def findSecondMinimumValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def findSecondMinimumValueHelper(root, max_heap, lookup):
if not root:
return
if root.val not in lookup:
heapq.heappush(max_heap, -root.val)
lookup.add(root.val)
if len(max_heap) > 2:
lookup.remove(-heapq.heappop(max_heap))
findSecondMinimumValueHelper(root.left, max_heap, lookup)
findSecondMinimumValueHelper(root.right, max_heap, lookup)
max_heap, lookup = [], set()
findSecondMinimumValueHelper(root, max_heap, lookup)
if len(max_heap) < 2:
return -1
return -max_heap[0]
|
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.visualization_description.descriptive_tool import DescribeData
from src.models.data_modules import CheXpertDataModule
fig_path_report = '../Thesis-report/00_figures/cheXpert/'
save_figs = False
disease = 'Cardiomegaly'
def get_descriptive_cheXpert_data(data_set):
# Loading CheXpert
dm = CheXpertDataModule(**{
"target_disease": "Cardiomegaly",
'multi_label': False,
"uncertainty_approach": "U-Zeros",
'tiny_sample_data': False,
'extended_image_augmentation':False})
if data_set == "train":
meta_dat = dm.train_data.dataset_df.assign(
y = dm.train_data.y.squeeze()
)
if data_set == "val":
meta_dat = dm.val_data.dataset_df.assign(
y = dm.val_data.y.squeeze()
)
if data_set == "test":
meta_dat = dm.test_data.dataset_df.assign(
y = dm.test_data.y.squeeze()
)
if data_set == "all":
# Uncertainty approach
if dm.uncertainty_approach == 'U-Ones':
target_map = {
np.nan: 0, # unmentioned
0.0: 0, # negative
-1.0: 1, # uncertain
1.0: 1 # positive
}
elif dm.uncertainty_approach == 'U-Zeros':
target_map = {
np.nan: 0, # unmentioned
0.0: 0, # negative
-1.0: 0, # uncertain
1.0: 1 # positive
}
meta_dat = dm.dataset_df.assign(
y = lambda x: x[dm.target_disease].map(target_map)
)
# Adding Race from processed demo data
processed_demo = pd.read_csv("data/CheXpert/processed/cheXpert_processed_demo_data.csv")
df = (meta_dat
.join(processed_demo.set_index("patient_id"), how = "left", on = "patient_id")
.dropna(axis = 0, subset=processed_demo.columns)
.rename(mapper = str.lower, axis = 'columns')
)
return df
def get_figures_and_tables(
df, a_name, disease = 'Cardiomegaly', fig_path_report = None, save_figs = False, orientation = 'h'):
if fig_path_report is None:
fig_path_report = '../Thesis-report/00_figures/cheXpert/'
desc = DescribeData(a_name = a_name,
y_name = "y",
id_name = 'patient_id',
data = df,
data_name=f'CheXpert, target: {disease}',
**{"decimal":4})
desc.descriptive_table_to_tex(target_tex_name=f'Has {disease}')
desc.plot_positive_rate(title = f'Percentage with {disease}', orientation=orientation)
if save_figs:
plt.savefig(fig_path_report+f"posperc_{a_name}_{data_set}.pdf", bbox_inches='tight')
desc.plot_n_target_across_sens_var(
orientation=orientation,
return_ax=False,
**{"class_1_label":disease, "class_0_label": f"No {disease}"})
if save_figs:
plt.savefig(fig_path_report+f"N_{a_name}_{data_set}.pdf", bbox_inches='tight')
#%%
if __name__ == "__main__":
# Choose dataset: test, train or val
data_set = "all"
df = get_descriptive_cheXpert_data(data_set)
# Get figures and tables for main report
get_figures_and_tables(df, a_name = 'sex', orientation = 'v', save_figs = save_figs)
get_figures_and_tables(df, a_name = 'race', save_figs = save_figs)
get_figures_and_tables(df, a_name = 'race_sex', save_figs = save_figs)
#%% Get prevalences in test/val/training
# Train
tbl_combined = []
for data_set in ['train', 'test']:
df = get_descriptive_cheXpert_data(data_set = data_set)
for attr in ['sex', 'race', 'race_sex']:
desc = DescribeData(a_name = attr,
y_name = "y",
id_name = 'patient_id',
data = df,
data_name=f'CheXpert, target: {disease}',
**{"decimal":4})
tbl = desc.descriptive_table.assign(
dataset = data_set,
sens_grp = attr)
tbl_combined.append(tbl)
tbl_combined = pd.concat(tbl_combined)
#%%
tex_pos_rate = (lambda x :
[f"{x.N_positive[i]} ({x.positive_frac[i]*100}%)" for i in range(x.shape[0])]
)
tex_conf_int = (lambda x :
[f"[{x.conf_lwr[i]*100}%, {x.conf_upr[i]*100}%]" for i in range(x.shape[0])]
)
col_order = [
'sens_grp', 'a', 'dataset',
'N', 'N_positive', 'positive_frac','conf_lwr', 'conf_upr']
tmp = (tbl_combined[col_order]
.reset_index(drop = True)
.round({'positive_frac': 4, 'conf_lwr': 4, 'conf_upr': 4})
.assign(
N_positive = tex_pos_rate,
CI = tex_conf_int)
.drop(columns = ["conf_lwr", "conf_upr", "positive_frac"])
.sort_values(['sens_grp', 'a', 'dataset'])
.rename(columns = {
"a": 'Group',
"N_positive": 'Has Cardiomegaly',
'sens_grp': 'Sensitive Attribute',
'dataset': 'Split'}))
tmp.to_csv('references/split_distribution_for_tablesgenerator.csv')
# %%
|
from typing import Optional
from pydantic import BaseModel
class ItemBase(BaseModel):
title: str
description: Optional[str] = None
class ItemCreate(ItemBase):
pass
class Item(ItemBase):
id: int
customer_id: int
class Config:
orm_mode = True
|
"""
This file is part of the rgf_grape python package.
Copyright (C) 2017-2018 S. Boutin
For details of the rgf_grape algorithm and applications see:
S. Boutin, J. Camirand Lemyre, and I. Garate, Majorana bound state engineering
via efficient real-space parameter optimization, ArXiv 1804.03170 (2018).
"""
import numpy as np
from numpy.linalg import inv
from rgf_grape.optimization.grape_solver import Grape_optimizer
import rgf_grape.ldos_utilities as ldos_utilities
###############################################################################
# Reflection matrix determinant
###############################################################################
def getDetr(rMat, sol):
if rMat.size > 0:
detr_cpl = np.linalg.det(rMat)
detr = np.linalg.det(rMat.real)
if np.absolute(detr_cpl.imag) > 1e-1:
print('Possible error : detr = ', detr_cpl)
print('rMat =\n', rMat)
else:
print('Error : reflection matrix of size zero.')
detr = np.nan
return detr
def cf_detr(args):
sol = args['sol']
side = args.get('side', 'L')
if side == 'LR':
detrL, tmp = _cf_detr(sol, 'L')
detrR, tmp = _cf_detr(sol, 'R')
return detrL, detrR
return _cf_detr(sol, side, args['E0'])
def _cf_detr(sol, side, E0=0):
rMat = sol.rMatrix_majorana(side, E0)
detr = getDetr(rMat, sol)
return detr.real, detr
def grad_detr(args):
E = args['E0']
sol = args['sol']
controls = args['controls']
side = args.get('side', 'L')
if side == 'LR':
gL, dL = _grad_detr(E, sol, controls, 'L')
gR, dR = _grad_detr(E, sol, controls, 'R')
return gL+gR, dL+dR
return _grad_detr(E, sol, controls, side)
def _grad_detr(E, sol, controls, side):
grapeOpt = Grape_optimizer(sol, controls, E)
if side == 'L':
endIndex = 0
else: # R
endIndex = sol.N+1
rMat = sol.rMatrix_majorana(side, E)
detr = getDetr(rMat, sol).real
rMatInv = inv(rMat)
grad = np.zeros((len(controls), sol.N))
for j in range(1, sol.N+1):
D_gnn_kj = grapeOpt.D_gnn_ret(endIndex, j)
for k, hk in enumerate(controls):
derivative = sol._rMat_majorana(side, D_gnn_kj[k], derivative=True)
grad[k, j-1] = np.einsum('ij,ji', rMatInv, derivative).real
grad *= detr
return grad.ravel(), detr
###############################################################################
# Functions for heuristic gap gradient (based on zero-energy LDOS)
###############################################################################
def heuristic_gap(args):
side = args['side']
nN = args['nN']
sitesToAvg = args['sitesToAvg_{}'.format(side)]
h_gap, avg, avg_n, norm = _heuristic_gap(
args['sol'], args['E0'], sitesToAvg, side, nN)
avgF = avg_n/(avg*norm)
return h_gap, avgF
def _heuristic_gap(sol, E, sitesToAvg, side, nN):
avg = 0.
avg_n = 0.
for s in sitesToAvg:
data_ldos = ldos_utilities.ldos(sol.calculate_gnn_ret(E, s), BdeG=True)
avg += data_ldos
avg_n += s*data_ldos
norm = len(sitesToAvg)
if side == 'L':
h_gap = ((avg_n/avg - (sitesToAvg[0]+nN))/norm)**(-1.)
else:
h_gap = ((sitesToAvg[0] - nN - (avg_n/avg))/norm)**(-1.)
return h_gap, avg, avg_n, norm
def performance_index(args):
side = args.get('side', 'LR')
if side == 'LR':
args['side'] = 'L'
phiL, detrL = performance_index(args)
args['side'] = 'R'
phiR, detrR = performance_index(args)
phi = phiL+phiR
detr = [detrL, detrR]
args['side'] = 'LR'
else:
detr, tmp = cf_detr(args)
args['results']['detr_{}'.format(side)] = detr
h, x = heuristic_gap(args)
args['results']['hgap_{}'.format(side)] = h
# Add comment + set externally 1e-3 threshold.
if detr < -0.999:
phi = -1.*h
elif detr > (1-1e-3):
phi = h
else:
phi = h*detr
return phi, detr
def _rgf_grad(args, hgap_gradient_function):
side = args.get('side', 'LR')
if side == 'LR':
args['side'] = 'L'
grad_L, phiL = _rgf_grad(args, hgap_gradient_function)
args['side'] = 'R'
grad_R, phiR = _rgf_grad(args, hgap_gradient_function)
grad = grad_L + grad_R
phi = phiL+phiR
args['side'] = 'LR'
else:
g_det, detr = grad_detr(args)
args['results']['detr_{}'.format(side)] = detr
grad_h, h = hgap_gradient_function(args)
args['results']['hgap_{}'.format(side)] = h
# Add comment
if np.absolute(np.absolute(detr)-1.) < 1e-3 :
grad = grad_h *np.sign(detr)
phi =h*np.sign(detr)
else :
grad = grad_h *detr + g_det*h
phi =h*detr
return grad, phi
def rgf_grad_naive(args, naive=False):
return _rgf_grad(args, lambda x: heuristic_gap_gradient(x, naive=True))
def rgf_grad_v1(args, naive=False):
return _rgf_grad(args, heuristic_gap_gradient)
###############################################################################
# Recursive gradient implementation
###############################################################################
def rgf_grape_grad(args):
return _rgf_grad(args, heuristic_gap_gradient_rec)
def heuristic_gap_gradient_rec(args):
E = args['E0']
sol = args['sol']
side = args['side']
sitesToAvg = args['sitesToAvg_{}'.format(side)]
controls = args['controls']
nN = args['nN']
return _heuristic_gap_grad_rec(sol, E, sitesToAvg, controls, side, nN)
def _heuristic_gap_grad_rec(sol, E, sitesToAvg, controls, side, nN):
grapeOpt = Grape_optimizer(sol, controls, E)
h_gap, avg, avg_n, norm = _heuristic_gap(sol, E, sitesToAvg, side, nN)
cf = 0.5*h_gap*h_gap/(np.pi*norm*avg)
if side == 'R':
cf *= -1.
full_list = np.array(
[(s - avg_n/avg)*cf for s in range(sol.N+1)], dtype=np.double)
coeff = np.zeros(sol.N+2, dtype=np.double)
coeff[sitesToAvg] = full_list[sitesToAvg]
grad = np.zeros((len(controls), sol.N))
gnn_ret = grapeOpt.gnnRet
gnnL = grapeOpt.gnnL
gnnR = grapeOpt.gnnR
# Sn_0 sum
for s in sitesToAvg:
mat = (gnn_ret[s]@gnn_ret[s])
for k, hk in enumerate(controls):
grad[k, s-1] += coeff[s]*np.trace(mat@hk).imag
# Sn_R
mat = np.zeros((sol.M, sol.M), dtype=np.complex128)
for s in range(min(sitesToAvg[0],sitesToAvg[-1])+1, sol.N+1):
mat += coeff[s-1]*(gnn_ret[s-1]@gnn_ret[s-1])
mat = grapeOpt.gnnR_u[s]@mat@grapeOpt.ud_gnnR[s]
for k, hk in enumerate(controls):
grad[k, s-1] += np.trace(mat@hk).imag
# Sn_L
mat = np.zeros((sol.M, sol.M), dtype=np.complex128)
for s in range(1, max(sitesToAvg[0],sitesToAvg[-1]))[::-1]:
mat += coeff[s+1]*(gnn_ret[s+1]@gnn_ret[s+1])
mat = grapeOpt.gnnL_ud[s]@mat@grapeOpt.u_gnnL[s]
for k, hk in enumerate(controls):
grad[k, s-1] += np.trace(mat@hk).imag
return grad.ravel(), h_gap
###############################################################################
# Exact gap performance index (using diagonalization)
###############################################################################
def exact_phi_OBC(args):
detr, tmp = cf_detr(args)
gap = args['wire'].calculate_OBC_gap()
args['results']['detr_L'] = detr
args['results']['hgap_L'] = gap
if detr < -0.999:
detr = -1.
return 1+(1+gap)*detr, detr
def exact_phi_PBC(args):
detr, tmp = cf_detr(args)
gap = args['wire'].calculate_PBC_gap()
args['results']['detr_L'] = detr
args['results']['hgap_L'] = gap
if detr < -0.999:
detr =-1.
return 1+(1+gap)*detr, detr
###############################################################################
# Heuristic performance index: first implementation correspoinding to
# rgf_grape_v1 in Fig. S1 of the SM of the manuscript.
# This implementation is less efficient then the recursive approach above.
###############################################################################
from rgf_grape.optimization import _functionsDefinitions as _fctDef
def indicesForLeftSum(j, sitesList):
if sitesList[-1] <= j:
return 0, 0
end = sitesList[-1]+1 # excluded i.e. start:end is the list we want
if sitesList[0] > j:
start = sitesList[0]
else:
start = j+1
return start, end
def indicesForRightSum(j, sitesList):
if sitesList[0] >= j:
return 0, 0
end = sitesList[0]-1 # excluded i.e. start:end:-1 is the list we want
if sitesList[-1] < j:
start = sitesList[-1]
else:
start = j-1
return start, end
def generateMatricesForLeftSum(j, start, end, grapeOpt, coeffs):
if end <= start:
return np.zeros(grapeOpt.controls.shape[0])
return _fctDef.generateMatricesForLeftSum(
j, grapeOpt.sol.M, start, end,
grapeOpt.gnnRet, grapeOpt.u_gnnL,
grapeOpt.gnnL_ud, coeffs, grapeOpt.controls)
def generateMatricesForRightSum(j, sr, er, grapeOpt, coeffs):
if sr <= er:
return np.zeros(grapeOpt.controls.shape[0])
return _fctDef.generateMatricesForRightSum(
j, grapeOpt.sol.M, sr, er, grapeOpt.gnnRet,
grapeOpt.ud_gnnR, grapeOpt.gnnR_u, coeffs, grapeOpt.controls)
def heuristic_gap_gradient(args, naive=False):
E = args['E0']
sol = args['sol']
side = args.get('side', 'LR')
controls = args['controls']
nN = args['nN']
if side == 'LR':
gL, hL = _heuristic_gap_grad(
sol, E, args['sitesToAvg_L'], controls, 'L', naive, nN)
gR, hR = _heuristic_gap_grad(
sol, E, args['sitesToAvg_R'], controls, 'R', naive, nN)
return gL+gR, hL+hR
sitesToAvg = args['sitesToAvg_{}'.format(side)]
return _heuristic_gap_grad(sol, E, sitesToAvg, controls, side, naive, nN)
def _heuristic_gap_grad(sol, E, sitesToAvg, controls, side, naive, nN):
grapeOpt = Grape_optimizer(sol, controls, E)
h_gap, avg, avg_n, norm = _heuristic_gap(sol, E, sitesToAvg, side, nN)
# Two -1. cancels here, one from the derivative and one from the ldos
cf = 0.5*h_gap*h_gap/(np.pi*norm*avg)
if side == 'R':
cf *= -1.
siteListIncr = sitesToAvg[::-1]
else:
siteListIncr = sitesToAvg
coeff = np.array(
[(s - avg_n/avg)*cf for s in range(sol.N+1)], dtype=np.double)
grad = np.zeros((len(controls), sol.N))
if naive:
print('Naive heuristic gap gradient calculation (VERY SLOW)')
coeff = coeff[sitesToAvg]
for j in range(1, sol.N+1):
res = np.zeros((len(controls)), dtype=np.double)
for s, c in zip(sitesToAvg, coeff):
res += c*np.imag(np.einsum('ijj',grapeOpt.D_gnn_ret(s, j)))
grad[:, j-1] = res
else:
gnn_ret = grapeOpt.gnnRet
if side == 'L':
endIndex = 0
else:
endIndex = sol.N+1
for j in range(1, sol.N+1):
# j<n
sl, el = indicesForLeftSum(j, siteListIncr)
resL = generateMatricesForLeftSum(j, sl, el, grapeOpt, coeff)
# j>n
sr, er = indicesForRightSum(j, siteListIncr)
resR = generateMatricesForRightSum(j, sr, er, grapeOpt, coeff)
D_gnn_kj = grapeOpt.D_gnn_ret(endIndex, j)
for k, hk in enumerate(controls):
t2 = 0
if siteListIncr[0] <= j <= siteListIncr[-1]:
t2 = coeff[j]*np.einsum(
'ij,jk,ki', gnn_ret[j], hk, gnn_ret[j]).imag
grad[k, j-1] = (t2 + resL[k] + resR[k]).real
return grad.ravel(), h_gap
|
# import modules
from pathlib import Path
# ask user where to store todo file
path = open("PATH")
pathIs = path.readlines()
path.close()
path = str(pathIs[0].strip())
fileOpen = Path(path)
# mainloop
while True:
# options of what to do
choices = "1 to add item, 2 to view list, 3 to finish a todo, 4 to delete a todo, 5 to exit"
print(choices)
todo = input("Enter choice (1-5): ")
# option 1 -> add item
if todo == "1":
file = open(fileOpen, "a+")
text = input("Enter item: ")
file.write("\n - [ ] " + text)
file.close()
# option 2 -> read list
elif todo == "2":
file = open(fileOpen, "r+")
list = file.readlines()
file.close()
print(list)
# option 3 -> finish a todo
elif todo == "3":
with open(fileOpen, 'r') as file:
data = file.readlines()
print(data)
file.close()
todoNum = input("Which todo do you want to finish (i.e. 1, 2 etc.): ")
todoNum = int(todoNum)
data[todoNum] = data[todoNum].replace("- [ ]", "- [x]")
file = open(fileOpen, 'w+')
file.writelines(data)
file.close()
# option 4 -> delete todo
elif todo == "4":
with open(fileOpen, 'r') as file:
data = file.readlines()
print(data)
file.close()
todoNum = input("Which todo do you want to finish (i.e. 1, 2 etc.): ")
todoNum = int(todoNum)
data[todoNum] = ""
file = open(fileOpen, 'w+')
file.writelines(data)
file.close()
# option 5 -> exit program
elif todo == "5":
print("Bye!")
break
|
#!/usr/bin/env python3
import sys
import overrides
from utils import memoize
@memoize
def get():
timers_val = overrides.get('CHPL_TIMERS', 'generic')
return timers_val
def _main():
timers_val = get()
sys.stdout.write("{0}\n".format(timers_val))
if __name__ == '__main__':
_main()
|
# 「プロパティ」エリア → 「モディファイア」タブ
import os, re, sys, bpy, time, bmesh, mathutils, math
from . import common
# メニュー等に項目追加
def menu_func(self, context):
ob = context.active_object
if ob:
if ob.type == 'MESH':
me = ob.data
if len(ob.modifiers):
self.layout.operator('object.forced_modifier_apply', icon_value=common.preview_collections['main']['KISS'].icon_id)
class forced_modifier_apply(bpy.types.Operator):
bl_idname = 'object.forced_modifier_apply'
bl_label = "モディファイア強制適用"
bl_description = "シェイプキーのあるメッシュのモディファイアでも強制的に適用します"
bl_options = {'REGISTER', 'UNDO'}
is_applies = bpy.props.BoolVectorProperty(name="適用するモディファイア", size=32, options={'SKIP_SAVE'})
@classmethod
def poll(cls, context):
ob = context.active_object
return len(ob.modifiers)
def invoke(self, context, event):
ob = context.active_object
if len(ob.modifiers) == 0:
return {'CANCELLED'}
return context.window_manager.invoke_props_dialog(self)
def draw(self, context):
prefs = common.preferences()
self.layout.prop(prefs, 'custom_normal_blend', icon='SNAP_NORMAL', slider=True)
self.layout.label("適用するモディファイア")
ob = context.active_object
for index, mod in enumerate(ob.modifiers):
icon = 'MOD_%s' % mod.type
try:
self.layout.prop(self, 'is_applies', text=mod.name, index=index, icon=icon)
except:
self.layout.prop(self, 'is_applies', text=mod.name, index=index, icon='MODIFIER')
if mod.show_viewport:
self.is_applies[index] = True
def execute(self, context):
custom_normal_blend = common.preferences().custom_normal_blend
bpy.ops.object.mode_set(mode='OBJECT')
ob = context.active_object
me = ob.data
is_shaped = bool(me.shape_keys)
pre_selected_objects = context.selected_objects[:]
pre_mode = ob.mode
if is_shaped:
pre_relative_keys = [s.relative_key.name for s in me.shape_keys.key_blocks]
pre_active_shape_key_index = ob.active_shape_key_index
shape_names = [s.name for s in me.shape_keys.key_blocks]
shape_deforms = []
for shape in me.shape_keys.key_blocks:
shape_deforms.append([shape.data[v.index].co.copy() for v in me.vertices])
ob.active_shape_key_index = len(me.shape_keys.key_blocks) - 1
for i in me.shape_keys.key_blocks[:]:
ob.shape_key_remove(ob.active_shape_key)
new_shape_deforms = []
for shape_index, deforms in enumerate(shape_deforms):
temp_ob = ob.copy()
temp_me = me.copy()
temp_ob.data = temp_me
context.scene.objects.link(temp_ob)
try:
for vert in temp_me.vertices:
vert.co = deforms[vert.index].copy()
override = context.copy()
override['object'] = temp_ob
for index, mod in enumerate(temp_ob.modifiers):
if self.is_applies[index]:
try:
bpy.ops.object.modifier_apply(override, modifier=mod.name)
except:
ob.modifiers.remove(mod)
new_shape_deforms.append([v.co.copy() for v in temp_me.vertices])
finally:
common.remove_data(temp_ob)
common.remove_data(temp_me)
if ob.active_shape_key_index != 0:
ob.active_shape_key_index = 0
me.update()
copy_modifiers = ob.modifiers[:]
is_selected_armature = False
for index, mod in enumerate(copy_modifiers):
if self.is_applies[index]:
if mod.type == 'ARMATURE':
is_selected_armature = True
else:
if mod.type == 'MIRROR':
for vg in ob.vertex_groups[:]:
replace_list = ((r'\.L$', ".R"), (r'\.R$', ".L"), (r'\.l$', ".r"), (r'\.r$', ".l"), (r'_L$', "_R"), (r'_R$', "_L"), (r'_l$', "_r"), (r'_r$', "_l"))
for before, after in replace_list:
mirrored_name = re.sub(before, after, vg.name)
if mirrored_name not in ob.vertex_groups:
ob.vertex_groups.new(mirrored_name)
try:
bpy.ops.object.modifier_apply(modifier=mod.name)
except:
ob.modifiers.remove(mod)
arm_ob = None
if is_selected_armature:
for mod in ob.modifiers:
if mod.type == "ARMATURE":
arm_ob = mod.object
if arm_ob:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.mode_set(mode='OBJECT')
arm = arm_ob.data
arm_pose = arm_ob.pose
pose_quats = {}
for bone in arm.bones:
pose_bone = arm_pose.bones[bone.name]
bone_quat = bone.matrix_local.to_quaternion()
pose_quat = pose_bone.matrix.to_quaternion()
result_quat = pose_quat * bone_quat.inverted()
pose_quats[bone.name] = result_quat.copy()
custom_normals = []
for loop in me.loops:
vert = me.vertices[loop.vertex_index]
no = vert.normal.copy()
total_weight = 0.0
for vge in vert.groups:
vg = ob.vertex_groups[vge.group]
try:
pose_quats[vg.name]
except KeyError:
continue
total_weight += vge.weight
total_quat = mathutils.Quaternion()
for vge in vert.groups:
vg = ob.vertex_groups[vge.group]
try:
total_quat = total_quat.slerp(pose_quats[vg.name], vge.weight / total_weight)
except KeyError:
pass
no.rotate(total_quat)
custom_normals.append(no)
for index, mod in enumerate(copy_modifiers):
if self.is_applies[index] and mod.type == 'ARMATURE':
try:
bpy.ops.object.modifier_apply(modifier=mod.name)
except:
ob.modifiers.remove(mod)
context.scene.objects.active = ob
if is_shaped:
for deforms in new_shape_deforms:
if len(me.vertices) != len(deforms):
self.report(type={'ERROR'}, message="ミラー等が原因で頂点数が変わっているためシェイプキーを格納できません、中止するのでCtrl+Z等で元に戻し修正してください。")
return {'CANCELLED'}
for shape_index, deforms in enumerate(new_shape_deforms):
bpy.ops.object.shape_key_add(from_mix=False)
shape = ob.active_shape_key
shape.name = shape_names[shape_index]
for vert in me.vertices:
shape.data[vert.index].co = deforms[vert.index].copy()
for shape_index, shape in enumerate(me.shape_keys.key_blocks):
shape.relative_key = me.shape_keys.key_blocks[pre_relative_keys[shape_index]]
ob.active_shape_key_index = pre_active_shape_key_index
for temp_ob in pre_selected_objects:
temp_ob.select = True
bpy.ops.object.mode_set(mode=pre_mode)
if arm_ob:
for i, loop in enumerate(me.loops):
vert = me.vertices[loop.vertex_index]
no = vert.normal.copy()
try:
custom_rot = mathutils.Vector((0.0, 0.0, 1.0)).rotation_difference(custom_normals[i])
except:
continue
original_rot = mathutils.Vector((0.0, 0.0, 1.0)).rotation_difference(no)
output_rot = original_rot.slerp(custom_rot, custom_normal_blend)
output_no = mathutils.Vector((0.0, 0.0, 1.0))
output_no.rotate(output_rot)
custom_normals[i] = output_no
me.use_auto_smooth = True
me.normals_split_custom_set(custom_normals)
return {'FINISHED'}
|
"""Test gen_iset."""
import joblib
from tinybee.gen_iset import gen_iset
def test_gen_iset():
"""Test gen_iset."""
cmat = joblib.load("data/cmat.lzma")
res = gen_iset(cmat, verbose=False)
# logger.debug("res: %s, %s", res, res[68])
# logger.info("res: %s, %s", res, res[68])
# logger.debug("res[68]: %s", res[68])
# (68, 68)
assert res[68] == (68, 68)
|
from django.conf.urls import patterns, url
from proyectos import views
urlpatterns = patterns('',
# /proyectos/
url(r'^$', views.index, name='index'),
# /proyectos/5/
url(r'^(?P<id_proyecto>\d+)/$', views.proyecto, name='proyecto'),
# /proyectos/nuevo_proyecto
url(r'^nuevo_proyecto/$', views.nuevo_proyecto, name='nuevo_proyecto'),
# /proyectos/terminados
url(r'^terminados/$', views.terminados, name='terminados'),
# /proyectos/5/2/
url(r'^(?P<id_proyecto>\d+)/(?P<id_tarea>\d+)/$', views.tarea, name='tarea'),
# /proyectos/tareas_terminadas
url(r'^tareas_terminadas/$', views.tareas_terminadas, name='tareas_terminadas'),
# /proyectos/login/
url(r'^login/$', views.web_login, name='login'),
# /proyectos/logout/
url(r'^logout/$', views.web_logout, name='logout'),
# /proyectos/usuarios/
url(r'^usuarios/$', views.usuarios, name='usuarios'),
# /proyectos/usuarios/20
url(r'^usuarios/(?P<id_usuario>\d+)/$', views.usuario, name='usuario'),
# /proyectos/usuarios/20/baja
url(r'^usuarios/(?P<id_usuario>\d+)/baja/$', views.baja, name='baja'),
)
|
'''
Purpose:
# Run sumo for parameter input values to determine sensitivity of parameter to desired outputs
# Create dictionary of lists for input and output values
# Save plots of input Parameter changes and output parameter results
Author: Matthew James Carroll
'''
import xml.etree.ElementTree as ET
from SumoConnect import *
from PlotIt import *
import numpy as np
# Sensitivity Analysis class to run SUMO, retrieve output data, and plot data
##########################################################################################
class SensAnalys():
def __init__(self, paramLimits, defParams = None, \
outParamList = ["density", "sampledSeconds", "waitingTime", "occupancy", "timeLoss", "speed", "entered", "flow", "collisions"], \
outFolderName="output", \
simLoc='C:/Users/Matt/SUMO/Gville Test1', \
cfgFileName="osm.sumocfg", \
rouFileName="routes.rou.xml", \
outFileName="main.output.xml", \
addFileName="additional.xml", \
collFileName=None, \
plot = False, \
scale = "1", \
dataPoints = 10,
ignoreZeros = False,
idmRatio = 1):
print("Running Sensitivity Analysis...")
# Set default values for IDM taken from SUMO documentation
###############################################################################
if defParams == None:
self.defParams = {}
# Minimum gap between following and lead car (meters)
self.defParams["minGap"] = 2.5
# Maximum acceleration of car (m/s^2)
self.defParams["accel"] = 2.9
# Maximum deceleration of car (m/s^2)
self.defParams["decel"] = 7.5
# Emergency deceleration of car (m/s^2)
self.defParams["emergencyDecel"] = 9.0
# The driver's desired (minimum) time headway
self.defParams["tau"] = 1.0
# acceleration exponent
self.defParams["delta"] = 4.0
else:
self.defParams = defParams
# Redefine input variables as part of class
###############################################################################
self.ignoreZeros = ignoreZeros
self.idmRatio = idmRatio
self.paramLimits = paramLimits
self.outParamList = outParamList
self.outFolderName = "SensitivityAnalysis/" + outFolderName
self.simLoc = simLoc
self.cfgFileName = cfgFileName
self.rouFileName = rouFileName
self.outFileName = outFileName
self.addFileName = addFileName
self.collFileName = collFileName
self.plot = plot
self.scale = scale
self.dataPoints = dataPoints
# Run sumo and store output data of default params
self.defParams = self.runDefParams()
# Change min and max param data to evenly distributed list of data points
###################################################################################
self.params = {}
self.params = self.createDataPoints()
# Create dictionary for output
###################################################################################
self.outputDict = {}
# Call function to create output dictionary of input and output data points
###################################################################################
self.outputDict = self.runSensAnalys()
print(self.outputDict)
# Call myPlot class to save plots in output folder
###################################################################################
self.plots = createPlots(self.outputDict, self.outFolderName, self.defParams, self.idmRatio, self.plot)
del(self.plots)
# Run SUMO with default parameters
################################################################################################
def runDefParams(self):
X = self.setParam()
outputData = self.runIt(X)
for outputParam in self.outParamList:
self.defParams[outputParam] = outputData.mean[outputParam]
return self.defParams
# Function to create a data points distributed based on max and min value for a parameter and the desired data points
################################################################################################
def createDataPoints(self):
for param in self.paramLimits:
increment = (self.paramLimits[param][1] - self.paramLimits[param][0]) / (self.dataPoints - 1)
number = self.paramLimits[param][0]
self.params[param] = []
while number <= self.paramLimits[param][1]:
self.params[param].append(float("{:.5f}".format(number)))
number = number + increment
return self.params
# Function to run SUMO for each parameter change thgen put input and output data in a dictionary
########################################################################################
def runSensAnalys(self):
# Create dictionary for output data for each input param type
for paramType in self.params:
self.outputDict[paramType] = {}
self.outputDict[paramType]["input"] = self.params[paramType]
for outputParam in self.outParamList:
self.outputDict[paramType][outputParam] = {}
# Run SUMO for each parameter type and each parameter value in list
for paramType in self.params:
for param in self.params[paramType]:
# Check what param type is being altered then call function to set parameters in "rou" file
if paramType == 'minGap':
X = self.setParam(minGap=param)
print(paramType, param)
elif paramType == 'accel':
X = self.setParam(accel=param)
print(paramType, param)
elif paramType == 'decel':
X = self.setParam(decel=param)
print(paramType, param)
elif paramType == 'emergencyDecel':
X = self.setParam(emergencyDecel=param)
print(paramType, param)
elif paramType == 'tau':
X = self.setParam(tau=param)
print(paramType, param)
elif paramType == 'delta':
X = self.setParam(delta=param)
print(paramType, param)
elif paramType == 'stepping':
X = self.setParam(stepping=param)
print(paramType, param)
outputData = self.runIt(X)
print(outputData.mean)
# Store mean of each output parameter
for outputParam in self.outParamList:
if outputParam == "collisions":
try:
self.outputDict[paramType][outputParam]["mean"].append(outputData.mean[outputParam])
except:
self.outputDict[paramType][outputParam]["mean"] = []
self.outputDict[paramType][outputParam]["mean"].append(outputData.mean[outputParam])
else:
try:
self.outputDict[paramType][outputParam]["mean"].append(outputData.mean[outputParam])
self.outputDict[paramType][outputParam]["median"].append(outputData.median[outputParam])
self.outputDict[paramType][outputParam]["Q1"].append(outputData.Q1[outputParam])
self.outputDict[paramType][outputParam]["Q3"].append(outputData.Q3[outputParam])
except:
self.outputDict[paramType][outputParam]["mean"] = []
self.outputDict[paramType][outputParam]["median"] = []
self.outputDict[paramType][outputParam]["Q1"] = []
self.outputDict[paramType][outputParam]["Q3"] = []
self.outputDict[paramType][outputParam]["mean"].append(outputData.mean[outputParam])
self.outputDict[paramType][outputParam]["median"].append(outputData.median[outputParam])
self.outputDict[paramType][outputParam]["Q1"].append(outputData.Q1[outputParam])
self.outputDict[paramType][outputParam]["Q3"].append(outputData.Q3[outputParam])
return self.outputDict
# Function to change the input parameters into a list
#################################################################################################
def setParam(self, \
minGap=None, \
accel=None, \
decel=None, \
emergencyDecel=None, \
tau=None, \
delta=None, \
stepping=None):
print("in setParam")
if minGap == None:
minGap = self.defParams["minGap"]
if accel == None:
accel = self.defParams["accel"]
if decel == None:
decel = self.defParams["decel"]
if emergencyDecel == None:
emergencyDecel = self.defParams["emergencyDecel"]
if tau == None:
tau = self.defParams["tau"]
if delta == None:
delta = self.defParams["delta"]
X = np.array([minGap, accel, decel, emergencyDecel, tau, delta, stepping])
return X
# Function to Set vtype parameters, run SUMO, then retrieve output data
###################################################################################################
def runIt(self, X):
setVtype(self.simLoc, self.rouFileName, X, self.idmRatio)
runSUMO(self.simLoc, self.cfgFileName, self.collFileName, self.addFileName, scale = self.scale)
data = outputData(self.simLoc, self.outFileName, self.ignoreZeros, paramList=self.outParamList, collFileName=self.collFileName)
return data
|
# encoding: utf-8
from ctypes import *
class PyDystopia(object):
"""Tokyo Dystopia Python Interface"""
def __init__(self, dbname='khufu'):
self.dbname = dbname
try:
self.lib=CDLL('libtokyodystopia.so')
except:
self.lib=CDLL('libtokyodystopia.dylib')
self.idb=self.lib.tcidbnew()
#IDBOWRITER | IDBOCREAT
ecode=self.lib.tcidbopen(self.idb,self.dbname,6)
#ecode=self.lib.tcidbopen(self.idb,self.dbname)
def search(self,text,returntext=False):
"""search text"""
lib = self.lib
idb = self.idb
rnum=c_int()
lib.tcidbsearch2.restype=POINTER(c_uint64)
result=lib.tcidbsearch2(idb,text,byref(rnum))
for i in range(rnum.value):
if returntext:
lib.tcidbget.restype = c_char_p
stext = lib.tcidbget(idb,c_int64(result[i]))
yield result[i],stext
yield result[i]
def put(self,kid,text):
return self.lib.tcidbput(self.idb,c_int64(kid),text)
def commit(self):
return self.lib.tcidbclose(self.idb)
def get(self,kid):
self.lib.tcidbget.restype = c_char_p
return self.lib.tcidbget(self.idb,c_int64(kid))
if __name__ == '__main__':
pd = PyDystopia()
for kid in pd.search('你好'):
print "kid",kid
print pd.put(5,'用Python插入的')
print pd.put(6,'zhmocean脱耦和求偶的心理学差异...风马牛不相及的事情却有着千丝万缕的联系...')
print pd.commit()
print pd.get(5)
|
# Generated by Django 3.2.13 on 2022-05-25 07:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('probes', '0012_auto_20220329_0643'),
]
operations = [
migrations.RemoveField(
model_name='feed',
name='url',
),
]
|
"""Complete a campaign in Gophish and/or output a Gophish campaign summary.
Usage:
gophish-complete [--campaign=NAME] [--summary-only] [--log-level=LEVEL] SERVER API_KEY
gophish-complete (-h | --help)
gophish-complete --version
Options:
API_KEY Gophish API key.
SERVER Full URL to Gophish server.
-c --campaign=NAME Gophish campaign name.
-s --summary-only Output a summary of a Gophish campaign.
-h --help Show this screen.
--version Show version.
-l --log-level=LEVEL If specified, then the log level will be set to
the specified value. Valid values are "debug", "info",
"warning", "error", and "critical". [default: info]
NOTE:
* If a campaign name is not provided, all assessment campaigns will be listed to select from.
"""
# import IPython; IPython.embed() #<<< BREAKPOINT >>>
# sys.exit(0)
# Standard Python Libraries
import logging
import sys
from typing import Dict
# Third-Party Libraries
from docopt import docopt
import requests
# cisagov Libraries
from tools.connect import connect_api
from util.input import get_input, get_number
from ._version import __version__
# Disable "Insecure Request" warning: Gophish uses a self-signed certificate
# as default for https connections, which can not be verified by a third
# party; thus, an SSL insecure request warning is produced.
requests.packages.urllib3.disable_warnings()
def get_campaign_id(campaign_name, campaigns):
"""Get campaign id from campaign name.
Args:
campaign_name (string): Full campaign name.
campaigns (dict): Campaign id as key, campaign name as value.
Raises:
LookupError: Campaign name is not found in campaigns dictionary.
Returns:
Campaign id corresponding to the campaign name provided.
"""
for campaign_id, name_value in campaigns.items():
if name_value == campaign_name:
return campaign_id
raise LookupError(f'Campaign name "{campaign_name}" not found.')
def get_campaigns(api, assessment_id=""):
"""Return a dictionary containing all campaigns.
When called with a blank string for the assessment_id, the default value,
all campaigns in all assessments will be returned. If an assessment_id is
provided, then only the campaigns for that assessment will be returned.
Args:
api (Gophish API): Connection to Gophish server via the API.
assessment_id (string): Assessment identifier to get campaigns from.
Raises:
LookupError: No campaigns found for the provided assessment id.
Returns:
dict: Campaign id as key, campaign name as value.
"""
allCampaigns = api.campaigns.get()
assessmentCampaigns = dict()
for campaign in allCampaigns:
if campaign.name.startswith(assessment_id):
assessmentCampaigns[campaign.id] = campaign.name
if len(assessmentCampaigns) == 0:
raise LookupError(f"No campaigns found for assessment {assessment_id}")
return assessmentCampaigns
def select_campaign(campaigns):
"""Return the ID of a selected campaign."""
print("Please select a Campaign ID:")
print("\tID: Name")
for id, name in campaigns.items():
print(f"\t {id}: {name}")
print("")
while True:
inputId = get_number("ID: ")
if inputId in campaigns:
break
else:
logging.warning("Bad Campaign ID")
print("Try again...")
return inputId
def complete_campaign(api_key, server, campaign_id):
"""Complete a campaign in Gophish.
Args:
api_key (string): Gophish API key.
server (string): Full URL to Gophish server.
campaign_id (int): Gophish campaign id.
Raises:
UserWarning: Gophish is unsuccessful in completing the campaign.
"""
url = f"{server}/api/campaigns/{campaign_id}/complete?api_key={api_key}"
# Bandit complains about disabling the SSL certificate check, but we have
# no choice here since we are using a self-signed certificate.
response = requests.get(url=url, verify=False) # nosec
if not response.json()["success"]:
raise UserWarning(response.json()["message"])
else:
print(f'\n{response.json()["message"]}')
def print_summary(api, campaign_id):
"""Print a campaign summary."""
summary = api.campaigns.summary(campaign_id=campaign_id)
print("Campaign Summary:")
print(f"\tName: {summary.name}")
print(f"\tStatus: {summary.status}")
print(f"\tLaunch Date: {summary.launch_date}")
print(f"\tCompleted Date: {summary.completed_date}")
print(f"\tTotal Users: {summary.stats.total}")
print(f"\tTotal Sent: {summary.stats.sent}")
print(f"\tTotal Clicks: {summary.stats.clicked}")
return True
def main() -> None:
"""Set up logging, connect to API, call requested function(s)."""
args: Dict[str, str] = docopt(__doc__, version=__version__)
# Set up logging
log_level = args["--log-level"]
try:
logging.basicConfig(
format="\n%(levelname)s: %(message)s", level=log_level.upper()
)
except ValueError:
logging.critical(
'"%s" is not a valid logging level. Possible values are debug, info, warning, and error.',
log_level,
)
sys.exit(1)
# Connect to API
try:
api = connect_api(args["API_KEY"], args["SERVER"])
logging.debug('Connected to: "%s"', args["SERVER"])
except Exception as e:
logging.critical(e.args[0])
sys.exit(1)
try:
if args["--campaign"]:
# Use campaign name to find campaign id.
campaigns = get_campaigns(api)
campaign_id = get_campaign_id(args["--campaign"], campaigns)
else:
# User inputs assessment id and selects campaign from lists.
assessment_id = get_input("Enter the Assessment ID:")
campaigns = get_campaigns(api, assessment_id)
campaign_id = select_campaign(campaigns)
except LookupError as err:
logging.error(err)
sys.exit(1)
if args["--summary-only"]:
# Output summary only.
print_summary(api, campaign_id)
else:
# Complete and output summary.
try:
complete_campaign(args["API_KEY"], args["SERVER"], campaign_id)
except UserWarning as err:
logging.warning(err)
sys.exit(1)
print_summary(api, campaign_id)
|
from unicorn.x86_const import *
hooks = None
hooks = set(vars().keys())
def GetWindowsDirectoryA(ut):
emu = ut.emu
retaddr = ut.popstack()
lpBuffer = ut.popstack()
uSize = ut.popstack()
windir = "C:\\Windows"
print 'GetWindowsDirectoryA = "{0}"'.format(windir)
emu.mem_write(lpBuffer, windir)
emu.reg_write(UC_X86_REG_EAX, len(windir))
ut.pushstack(retaddr)
def lstrcat(ut):
emu = ut.emu
retaddr = ut.popstack()
lpString1 = ut.popstack()
lpString2 = ut.popstack()
lpString1_s = ut.getstr(lpString1)
lpString2_s = ut.getstr(lpString2)
print 'lstrcat ("{0}", "{1}")'.format(lpString1_s, lpString2_s)
emu.mem_write(lpString1+len(lpString1_s), str(lpString2_s))
ut.pushstack(retaddr)
def ExitProcess(ut):
retaddr = ut.popstack()
uExitCode = ut.popstack()
print 'ExitProcess ({0})'.format(uExitCode)
ut.pushstack(retaddr)
def IsDebuggerPresent(ut):
retaddr = ut.popstack()
res = 0
print 'IsDebuggerPresent = {0}'.format(res)
ut.emu.reg_write(UC_X86_REG_EAX, res)
ut.pushstack(retaddr)
def GetProcAddress(ut):
retaddr = ut.popstack()
hModule = ut.popstack()
lpProcName = ut.popstack()
lpProcName_s = str(ut.getstr(lpProcName))
res = None
if lpProcName_s in ut.dll_funcs.keys():
res = ut.dll_funcs[lpProcName_s]
else:
res = 0x0
print 'GetProcAddress (hModule=0x{0:x}, lpProcName="{1}") = 0x{2:08x}'.format(hModule, lpProcName_s, res)
ut.emu.reg_write(UC_X86_REG_EAX, res)
ut.pushstack(retaddr)
def LoadLibraryA(ut):
retaddr = ut.popstack()
lpFileName = ut.popstack()
lpFileName_s = str(ut.getstr(lpFileName))
res = None
if lpFileName_s in map(lambda x:x[0], ut.dlls):
res = filter(lambda x:x[0]==lpFileName_s, ut.dlls)[0][1]
else:
res = ut.load_dll(lpFileName_s)
print 'LoadLibraryA (lpFileName="{0}")'.format(lpFileName_s)
ut.pushstack(retaddr)
def WinExec(ut):
retaddr = ut.popstack()
lpCmdLine = ut.popstack()
lpCmdLine_s = ut.getstr(lpCmdLine)
uCmdShow = ut.popstack()
print 'WinExec (lpCmdLine="{0}", uCmdShow=0x{1:x})'.format(lpCmdLine_s, uCmdShow)
ut.emu.reg_write(UC_X86_REG_EAX, 0x20)
ut.pushstack(retaddr)
hooks = set(vars().keys()).difference(hooks)
|
'''
A simple tutorial program to FM demodulate an APRS IQ.wav file
NOTE: All documentation is at directdemod.readthedocs.io
A simple FM demodulator would be a good start for us.
Record a sample IQ.wav file from your RTLSDR or use the one provided in the samples flder.
'''
# Firstly we will have to import whatever libraries we would need
import os, sys
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
from directdemod import source, comm, demod_fm
import matplotlib.pyplot as plt
## First the source of data
fileName = "../samples/SDRSharp_20170830_073907Z_145825000Hz_IQ_autogain.wav"
sigsrc = source.IQwav(fileName)
## Next create a signal object, reading data from the source
# Read all values from the source into an array
sigArray = sigsrc.read(0, sigsrc.length)
# a commSignal object basically stores the signal array and its samplingrate
# if you want the array do sig.signal
# if you want the samping rate do sig.sampRate
sig = comm.commSignal(sigsrc.sampFreq, sigArray)
## Limit bandwidth, say 30000
sig.bwLim(30000)
## FM demodulate
fmDemodulator = demod_fm.demod_fm()
sig.funcApply(fmDemodulator.demod)
## plot the signal
plt.plot(sig.signal)
plt.show()
# Hmmm.. we dont see any signal, maybe because of lack of filters, so next we apply some filters to get a better result.
|
#
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
ga = sys.argv[1]
i = 1
output = open('goodgraph.out','w')
p = './louvain_to_gephi/' + ga
while os.path.exists(p + '/community_itr_' + str(i) + '.nodes'):
comms = open(p + '/community_itr_' + str(i) + '.nodes','r')
comm_hash = {}
for line in comms:
node, comm = line.strip().split('\t')
comm_hash[node] = comm
edges = open(p + '/graph_itr_' + str(i-1) + '.edges','r')
for line in edges:
source,target,weight = line.strip().split('\t')
output.write('\t'.join((source,comm_hash[source],target,comm_hash[target],weight,str(i),'\n')))
i = i + 1
|
import os
def list_to_scalar(input_list):
"""
This is a helper function, that turns a list into a scalar if its length is 1.
"""
if len(input_list) == 1:
return input_list[0]
else:
return input_list
class Folder:
def __init__(self, path):
self.abs_path = os.path.abspath(path)
self.basename = os.path.basename(self.abs_path)
def file_names(self, *args):
"""
You can pass indices of files names of which you want to get through *args.
"""
excluded_file_names = ['.DS_Store', '.gitignore']
current_file_names_snapshot_base = os.listdir(self.abs_path)
current_file_names_snapshot = [f for f in current_file_names_snapshot_base if f not in excluded_file_names]
if args:
result_list = []
for file_index in args:
result_list.append(
current_file_names_snapshot[file_index]
)
# если индексами выбран только один элемент, то вернуть скаляр
return list_to_scalar(result_list)
else:
# если никакой индекс не задан, то вернуть список
return current_file_names_snapshot
def file_paths(self, *args):
"""
You can pass indices of files names of which you want to get through *args.
"""
current_file_names_snapshot = self.file_names()
if args:
result_list = []
for file_index in args:
result_list.append(
os.path.join(self.abs_path, current_file_names_snapshot[file_index])
)
# если индексами выбран только один элемент, то вернуть скаляр
return list_to_scalar(result_list)
else:
# если никакой индекс не задан, то вернуть список
return [os.path.join(self.abs_path, x) for x in current_file_names_snapshot]
def files_count(self):
return len(self.file_paths())
|
import struct
class Slot:
def __init__(self):
self.num = 0
self.ref = None
class LocalVars(list):
def SetInt(self, index:int, val: int):
self[index].num = val
def GetInt(self, index:int) -> int:
return self[index].num
def SetFloat(self, index:int, val:float):
bits = struct.pack('>f',val)
self[index].num = struct.unpack('>i',bits)[0]
def GetFloat(self, index:int) -> float:
bits = struct.pack('>i',self[index].num)
return struct.unpack('>f',bits)[0]
def SetLong(self, index:int, val:int):
self[index].num = val & 4294967295
self[index+1].num = val >> 32
def GetLong(self, index:int) -> int:
low = self[index].num
high = self[index+1].num
return high << 32 | low
def SetDouble(self, index:int, val:float):
bits = struct.pack('>d',val)
self.SetLong(index, struct.unpack('>q',bits)[0])
def GetDouble(self, index:int) -> float:
bits = struct.pack('>q',self.GetLong(index))
return struct.unpack('>d',bits)[0]
def SetRef(self, index:int, ref):
self[index].ref = ref
def GetRef(self, index:int):
return self[index].ref
def SetSlot(self, index:int, slot:Slot):
self[index] = slot
def newLocalVars(maxLocals:int):
if maxLocals > 0:
lv = LocalVars()
for i in range(maxLocals):
lv.append(Slot())
return lv
return None
class OperandStack:
def __init__(self, slots):
self.size = 0
self.slots = slots
def PushInt(self, val:int):
self.slots[self.size].num = val
self.size += 1
def PopInt(self) -> int:
self.size -= 1
return self.slots[self.size].num
def PushFloat(self, val:float):
bits = struct.pack('>f', val)
self.slots[self.size].num = struct.unpack('>i', bits)[0]
self.size += 1
def PopFloat(self) -> float:
self.size -= 1
bits = struct.pack('>i', self.slots[self.size].num)
return struct.unpack('>f', bits)[0]
def PushLong(self, val:int):
self.slots[self.size].num = val & 4294967295
self.slots[self.size+1].num = val >> 32
self.size += 2
def PopLong(self) -> int:
self.size -= 2
low = self.slots[self.size].num
high = self.slots[self.size+1].num
return high << 32 | low
def PushDouble(self, val:float):
bits = struct.pack('>d', val)
self.PushLong(struct.unpack('>q',bits)[0])
def PopDouble(self) -> float:
bits = struct.pack('>q', self.PopLong())
return struct.unpack('>d', bits)[0]
def PushRef(self, ref):
self.slots[self.size].ref = ref
self.size += 1
def PopRef(self):
self.size -= 1
ref = self.slots[self.size].ref
self.slots[self.size].ref = None
return ref
def PushSlot(self, slot:Slot):
self.slots[self.size] = slot
self.size += 1
def PopSlot(self):
self.size -= 1
return self.slots[self.size]
def GetRefFromTop(self, n:int):
return self.slots[self.size-1-n].ref
def newOperandStack(maxStack:int) -> OperandStack:
if maxStack > 0:
slots = []
for i in range(maxStack):
slots.append(Slot())
return OperandStack(slots)
return None
|
"""print('-=-'*10)
res = False
p = int(input('Informe o 1° termo da PA: '))
r = int(input('Razão da PA: '))
decimo = p + (10-1) * r
per = 'N'
mais = 0
cont = 0
while p != (decimo+r):
print(p, end=' -> ')
p += r
cont += +1
while not res:
per = str(input('Quer continuar [S/N]? ')).upper().strip()
if per == 'S':
mais = int(input('Quantos termos a mais desejar calcular? '))
print('-=-'*10)
print('Os novos termos são:')
decimo = p + ((mais +1)-1)*r
while p <= (decimo-1):
print(p, end=' -> ')
p += r
cont += +1
elif per == 'N':
res = True
print('-=-'*10)
print('Sua progressaõ foi finalizada com {} termos.'.format(cont))
print('Finalizando')
print('\nSaindo do programa')"""
#CÓDIGO DO PROFESSOR
"""n = int(input('Informe o 1° TERMO: '))
r = int(input('Informe a RAZÃO: '))
termo = n
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print('{}'.format(termo), end=' - > ')
cont += 1
termo += r
print('PAUSA')
mais = int(input('Quantos termos você quer mostrar a mais? '))
print('Sua progressão foi finalizada mostrando {} termos.'.format(total))
print('\nfim')"""
|
"""Filename globbing utility with optional case sensitive override."""
try: # Python 2
from glob import _unicode
except: # Python 3
_unicode=str
unicode=str
from fnmatch import translate
import os,posixpath,ntpath
import sys
import re
__all__ = ["glob", "iglob"]
def path_split(p):
""" Well... it turns out that os.path.split is WRONG for both posix and nt.
Parameters
----------
p : str
The Path
Returns
-------
array_like
The split path
When you execute os.path.split('.') You SHOULD get ('.','') BUT... due
to the simplicity of os.path.split, it only looks for /, and it fails to make
an exception for this, which breaks the recursive logic of glob. The ORIGINAL
glob does not FIX this, it just doesn't care since it does a lexists on what
should have been isdir on a directory. Since I can't
use lexists here for case insensitive reasons, the logic needed to be
more lock tight... This (hopefully simple and complete?) solution will
look for result where dirname is empty and basename is made up of all
periods, and if that happens, switch it
"""
s = os.path.split(p)
if not s[0] and s[1] == '.'*len(s[1]):
return (s[1], s[0])
return s
def path_join(a, *p):
''' Same as path_split
Parameters
----------
a : str
*p :
Returns
-------
array_like
'''
if p==('',) and a == '.'*len(a):
return a
return os.path.join(a, *p)
def glob(pathname, case=None):
"""Return a list of paths matching a pathname pattern.
Parameters
----------
pathname : str
The Path Name
Returns
-------
list
A list of paths matching a pathname pattern
The pattern may contain simple shell-style wildcards a la fnmatch. However,
unlike fnmatch, filenames starting with a dot are special cases that are not
matched by '*' and '?' patterns.
Set case to true to force case sensitive, false to force case insensitive or
None(default) to run glob natively
"""
return list(iglob(pathname, case))
def checkcase(case=None):
""" Determing which case mode to use
Returns
-------
bool
If None(default) then uses which ever mode is native to the OS
If True, forces case sensitive mode
If False, forces case insensitive mode
"""
if case is None:
return os.path is posixpath
elif case:
return True
return False
def iglob(pathname, case=None):
"""Return an iterator which yields the paths matching a pathname pattern.
Parameters
----------
pathname : str
The Path Name
The pattern may contain simple shell-style wildcards a la fnmatch. However,
unlike fnmatch, filenames starting with a dot are special cases that are not
matched by '*' and '?' patterns.
Set case to true to force case sensitive, false to force case insensitive or
None(default) to run glob natively
"""
dirname, basename = path_split(pathname)
walker = os.walk(dirname)
case = checkcase(case)
# The only REAL use I have for these lines are to make sure iglob('')
#returns EMPTY. Other than that, this is repeat of logic below
if not dirname:
for name in glob1(os.curdir, basename):
yield name
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname:
dirs = iglob(dirname, case)
else:
dirs = [dirname]
if basename=='':
glob_in_dir = glob0
else:
glob_in_dir = glob1
for dirname in dirs:
for name in glob_in_dir(dirname, basename, case):
yield path_join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob0(dirname, basename, case=None):
"""
Parameters
----------
dirname : str
The Directory Name
basename : str
The Base Name
Returns
-------
list
A list of basenames
"""
if dirname == '':
dirname = os.curdir
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
return []
def glob1(dirname, pattern, case=None):
"""
Parameters
----------
dirname : str
The Directory Name
pattern : str
A Pattern
Returns
-------
list
A list of basenames
"""
if not dirname:
dirname = os.curdir
if isinstance(pattern, _unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern and pattern[0] != '.':
names = filter(lambda x: x[0] != '.', names)
return fnmatch_filter(names, pattern, case)
_casecache = {}
_nocasecache = {}
def fnmatch_filter(names, pat, casesensitive):
# Return the subset of the list NAMES that match PAT
"""
Parameters
----------
names : :obj:`list` or :obj:`tuple`
A list of names
pat : str
A pattern
casesensitive : bool
True if case sensitive, False if not.
Returns
-------
list
The subset of the list NAMES that match PAT
"""
result=[]
if casesensitive:
if not pat in _casecache:
res = translate(pat)
if len(_casecache) >= 100:
_casecache.clear()
_casecache[pat] = re.compile(res)
match=_casecache[pat].match
for name in names:
if match(name):
result.append(name)
else:
if not pat in _nocasecache:
res = translate(pat)
if len(_nocasecache) >= 100:
_nocasecache.clear()
_nocasecache[pat] = re.compile(res, re.IGNORECASE)
match=_nocasecache[pat].match
pat=ntpath.normcase(pat)
for name in names:
if match(ntpath.normcase(name)):
result.append(name)
return result
|
""" This is the start routine for Apache """
from .run_flask import run, log
if __name__ == '__main__':
log.debug("Starting Flask Service from Apache")
run()
|
import time
from multiprocessing import Value
from chillow.service.ai.artificial_intelligence import ArtificialIntelligence
from chillow.model.action import Action
from chillow.model.game import Game
class RandomAI(ArtificialIntelligence):
"""AI that randomly chooses an action ignoring the state of the game.
Attributes:
player: The player associated with this AI.
"""
def create_next_action(self, game: Game, return_value: Value):
"""See base class."""
self._turn_ctr += 1
action = Action.get_random_action()
return_value.value = action.get_index()
def get_information(self) -> str:
"""See base class."""
return ""
class RandomWaitingAI(RandomAI):
"""AI that randomly chooses an action ignoring the state of the game and waits five seconds.
Attributes:
player: The player associated with this AI.
"""
def create_next_action(self, game: Game, return_value: Value):
time.sleep(5)
super().create_next_action(game, return_value)
|
import unittest
import comandante as cli
import comandante.errors as error
from comandante.inner.test import suppress_output, capture_output
class SubCommand(cli.Handler):
@cli.command()
def command(self, **specified_options):
return specified_options
class App(cli.Handler):
def __init__(self):
super(App, self).__init__()
self.declare_option('global', 'g', bool, False)
self.declare_command('subcommand', SubCommand())
@cli.option('local', 'l', int, 0)
@cli.command()
def command(self, **specified_options):
return specified_options
class BasicTests(unittest.TestCase):
"""Integration tests for basic Handler and Command properties."""
def test_print_help_by_default(self):
app = App()
with capture_output() as (out, err):
app.invoke([])
full_doc = app.full_doc().rstrip()
invoke_output = out.getvalue().rstrip()
self.assertEqual(invoke_output, full_doc)
def test_unknown_command(self):
with suppress_output():
self.assertRaises(error.UnknownCommand, App().invoke, ['unknown'])
def test_help_subcommand(self):
app = App()
with capture_output() as (out, err):
app.invoke('help subcommand command'.split())
output = out.getvalue().rstrip()
full_name = 'app subcommand command'.split()
full_doc = app.subcommand.command.full_doc(full_name).rstrip()
self.assertEqual(output, full_doc)
def test_help_on_unknown_subcommand(self):
app = App()
try:
with capture_output() as (out, err):
app.invoke('subcommand unknown'.split())
except error.UnknownCommand as e:
subcommand_doc = app.subcommand.full_doc(full_name='app subcommand'.split())
self.assertIn(str(e), out.getvalue())
self.assertIn(subcommand_doc, out.getvalue())
def test_help_of_command_subcommand(self):
app = App()
with capture_output() as (out, err):
app.invoke('help subcommand command unknown'.split())
full_name = 'app subcommand command'.split()
command_doc = app.subcommand.command.full_doc(full_name)
self.assertIn(command_doc, out.getvalue())
def test_forward_global_options(self):
app = App()
self.assertEqual(set(app.declared_options.keys()), {'global'})
self.assertEqual(set(app.command.declared_options.keys()), {'global', 'local'})
self.assertEqual(set(app.subcommand.declared_options.keys()), {'global'})
self.assertEqual(set(app.subcommand.command.declared_options.keys()), {'global'})
def test_subcommand_use_global_option(self):
result = App().invoke('subcommand command -g'.split())
self.assertEqual(result, {'global': True})
def test_command_use_global_option(self):
result = App().invoke('command -g -l 42'.split())
self.assertEqual(result, {'global': True, 'local': 42})
def test_handler_has_empty_doc(self):
handler = cli.Handler()
self.assertEqual(handler.brief, '')
self.assertEqual(handler.descr, '')
def test_duplicate_command(self):
self.assertRaises(RuntimeError, App().declare_command, 'subcommand', SubCommand())
def test_duplicate_global_option_long_name(self):
self.assertRaises(RuntimeError, App().declare_option, 'global', 'unique', int, 0)
def test_duplicate_global_option_short_name(self):
self.assertRaises(RuntimeError, App().declare_option, 'unique', 'g', int, 0)
def test_duplicate_local_option_long_name(self):
self.assertRaises(RuntimeError, App().command.declare_option, 'local', 'unique', int, 0)
def test_duplicate_local_option_short_name(self):
self.assertRaises(RuntimeError, App().command.declare_option, 'unique', 'l', int, 0)
|
import json
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, cast
import yaml
from marshmallow import Schema
from lisa import schema
from lisa.util import LisaException, constants
from lisa.util.logger import get_logger
from lisa.util.module import import_module
from lisa.variable import (
VariableEntry,
load_from_env,
load_from_pairs,
load_from_runbook,
replace_variables,
)
_schema: Optional[Schema] = None
_get_init_logger = partial(get_logger, "init", "runbook")
def _load_extend_paths(current_path: Path, data: Any) -> List[str]:
result: List[str] = []
if constants.EXTENSION in data:
raw_extension = data[constants.EXTENSION]
if isinstance(raw_extension, Dict):
# for compatibility, convert extension to list of strings
raw_extension = schema.Extension.schema().load( # type:ignore
data[constants.EXTENSION]
)
raw_extension = raw_extension.paths
result = [
str(current_path.joinpath(path).absolute().resolve())
for path in raw_extension
]
return result
def _merge_variables(
merged_path: Path, merged_data: Dict[str, Any], existing_data: Dict[str, Any]
) -> List[Any]:
variables: List[schema.Variable] = []
if constants.VARIABLE in merged_data and merged_data[constants.VARIABLE]:
variables = [
schema.Variable.schema().load(variable) # type: ignore
for variable in merged_data[constants.VARIABLE]
]
# resolve to absolute path
for variable in variables:
if variable.file:
variable.file = str((merged_path / variable.file).resolve())
if constants.VARIABLE in existing_data and existing_data[constants.VARIABLE]:
existing_variables: List[schema.Variable] = [
schema.Variable.schema().load(variable) # type: ignore
for variable in existing_data[constants.VARIABLE]
]
# remove duplicate items
for existing_variable in existing_variables:
for variable in variables:
if (variable.name and variable.name == existing_variable.name) or (
variable.file and variable.file == existing_variable.file
):
variables.remove(variable)
break
variables.extend(existing_variables)
# serialize back for loading together
return [variable.to_dict() for variable in variables] # type: ignore
def _merge_extensions(
merged_path: Path, merged_data: Dict[str, Any], existing_data: Dict[str, Any]
) -> List[Any]:
old_extensions = _load_extend_paths(merged_path, merged_data)
extensions = _load_extend_paths(constants.RUNBOOK_PATH, existing_data)
# remove duplicate paths
for old_extension in old_extensions:
for extension in extensions:
if extension == old_extension:
extensions.remove(extension)
break
if extensions or old_extensions:
# don't change the order, old ones should be imported earlier.
old_extensions.extend(extensions)
extensions = old_extensions
return extensions
def _merge_data(
merged_path: Path, merged_data: Dict[str, Any], existing_data: Dict[str, Any]
) -> Dict[str, Any]:
"""
merge parent data to existing data. The existing data has higher priority.
"""
result = merged_data.copy()
# merge others
result.update(existing_data)
# merge variables, latest should be effective last
variables = _merge_variables(merged_path, merged_data, existing_data)
if variables:
result[constants.VARIABLE] = variables
# merge extensions
extensions = _merge_extensions(merged_path, merged_data, existing_data)
if extensions:
result[constants.EXTENSION] = extensions
return result
def _load_data(path: Path, used_path: Set[str]) -> Any:
"""
Load runbook, but not to validate. It will be validated after extension imported.
To support partial runbooks, it loads recursively.
"""
with open(path, "r") as file:
data = yaml.safe_load(file)
if constants.PARENT in data and data[constants.PARENT]:
parents_config = data[constants.PARENT]
log = _get_init_logger()
indent = len(used_path) * 4 * " "
# log.debug(f"{indent}found {len(parents_config)} parent runbooks")
merged_data: Dict[str, Any] = {}
for parent_config in parents_config:
parent: schema.Parent = schema.Parent.schema().load( # type: ignore
parent_config
)
if parent.strategy:
raise NotImplementedError("Parent doesn't implement Strategy")
raw_path = parent.path
if raw_path in used_path:
raise LisaException(
f"cycle reference parent runbook detected: {raw_path}"
)
# use relative path to parent runbook
parent_path = (path.parent / raw_path).resolve().absolute()
log.debug(f"{indent}loading parent: {raw_path}")
# clone a set to support same path is used in different tree.
new_used_path = used_path.copy()
new_used_path.add(raw_path)
parent_data = _load_data(parent_path, used_path=new_used_path)
merged_data = _merge_data(parent_path.parent, parent_data, merged_data)
data = _merge_data(path.parent, merged_data, data)
return data
def _import_extends(extends_runbook: List[str]) -> None:
for index, path in enumerate(extends_runbook):
import_module(Path(path), index=index)
def validate_data(data: Any) -> schema.Runbook:
global _schema
if not _schema:
_schema = schema.Runbook.schema() # type: ignore
assert _schema
runbook = cast(schema.Runbook, _schema.load(data))
log = _get_init_logger()
log.debug(f"merged runbook: {runbook.to_dict()}") # type: ignore
return runbook
def load_runbook(path: Path, user_variables: Optional[List[str]]) -> schema.Runbook:
"""
Loads a runbook given a user-supplied path and set of variables.
"""
constants.RUNBOOK_PATH = path.parent
constants.RUNBOOK_FILE = path
# load lisa itself modules
base_module_path = Path(__file__).parent.parent
import_module(base_module_path, logDetails=False)
# merge all parameters
log = _get_init_logger()
log.info(f"loading runbook: {path}")
data = _load_data(path.absolute(), set())
# load extended modules
if constants.EXTENSION in data:
_import_extends(_load_extend_paths(constants.RUNBOOK_PATH, data))
# load arg variables
variables: Dict[str, VariableEntry] = dict()
variables.update(load_from_runbook(data))
variables.update(load_from_env())
variables.update(load_from_pairs(user_variables))
# replace variables:
try:
data = replace_variables(data, variables)
constants.RUNBOOK = json.dumps(data, indent=2)
except Exception as identifier:
# log current runbook for troubleshooting.
log.info(f"current runbook: {data}")
raise identifier
# log message for unused variables, it's helpful to see which variable is not used.
log = _get_init_logger()
unused_keys = [key for key, value in variables.items() if not value.is_used]
if unused_keys:
log.debug(f"variables {unused_keys} are not used.")
# validate runbook, after extensions loaded
runbook = validate_data(data)
log = _get_init_logger()
constants.RUN_NAME = f"lisa_{runbook.name}_{constants.RUN_ID}"
log.info(f"run name is '{constants.RUN_NAME}'")
return runbook
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Histogram visualization
"""
import warnings
from qiskit.visualization.counts_visualization import plot_histogram
def iplot_histogram(data, figsize=None, number_to_keep=None,
sort='asc', legend=None):
""" Create a histogram representation.
Graphical representation of the input array using a vertical bars
style graph.
Args:
data (list or dict): This is either a list of dicts or a single
dict containing the values to represent (ex. {'001' : 130})
figsize (tuple): Figure size in pixels.
number_to_keep (int): The number of terms to plot and
rest is made into a single bar called other values
sort (string): Could be 'asc' or 'desc'
legend (list): A list of strings to use for labels of the data.
The number of entries must match the length of data.
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
Returns:
Figure: A matplotlib figure for the visualization
Example:
.. code-block::
from qiskit import QuantumCircuit, BasicAer, execute
from qiskit.visualization import iplot_histogram
%matplotlib inline
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
backend = BasicAer.get_backend('qasm_simulator')
job = execute(qc, backend)
iplot_histogram(job.result().get_counts())
"""
warnings.warn(
"The iplot_histogram function is deprecated and will be "
"removed in a future release. The hosted code this depended on no "
"longer exists so this is falling back to use the matplotlib "
"visualizations. qiskit.visualization.plot_histogram should be "
"used directly moving forward.", DeprecationWarning, stacklevel=2)
fig = plot_histogram(data, figsize=figsize, number_to_keep=number_to_keep,
sort=sort, legend=legend)
return fig
|
from flask import Blueprint, make_response, render_template, request, session, abort, send_from_directory
from jinja2.exceptions import TemplateNotFound
from .service_page import render_template_wo_statistics
# Инициализируем модуль Game
game_bp = Blueprint('Game', __name__, template_folder='../games', static_folder='../games', url_prefix='/games')
@game_bp.route('/<game>/<path:path>')
def static_route(game, path):
"""
Выполняет отправку статических файлов из нужной директории
Принимает на вход:
game - uuid - идентификатор игры
path - str - путь до файла. Он может быть многоуровневым, например "assets/script.js"
"""
response = make_response(game_bp.send_static_file(f'{game}/{path}'))
if path.split('.')[-1] == 'gz':
response.headers['Content-Encoding'] = 'gzip'
return response
@game_bp.route('/<game>/')
def game_route(game):
"""
Основной метод для получения игры
Принимает на вход:
game - uuid - идентификатор игры
args:
token - str - токен пользователя
*use_statistics - str - флаг использования класса по сбору статистики
* - опционально
"""
if game.count('.'):
return send_from_directory('./', game)
if not request.args.get('token'):
abort(401)
session['user'] = request.args.get('token')
template = None
# Если игра не использует класс по сбору статистики, то отобразим сначала пустой шаблон со скриптом для корректного
# выхода из игры. Внутри этого шаблона запустим игру в iframe уже без флага use_statistics
use_statistics = request.args.get('use_statistics')
if str(use_statistics).lower() == 'false':
url = f'/games/{game}/?token={request.args["token"]}'
resp = make_response(render_template_wo_statistics(url))
# resp.set_cookie('EndGame', '', expires=0, samesite=None, secure=True)
resp.headers.add('Set-Cookie', 'EndGame=false; SameSite=None; Secure=true; Path=/')
return resp
try:
template = render_template(f'{game}/index.html')
except TemplateNotFound:
abort(404)
session['current_game'] = game
resp = make_response(template)
# resp.set_cookie('EndGame', '', expires=0, samesite=None, secure=True)
resp.headers.add('Set-Cookie', 'EndGame=false; SameSite=None; Secure=true; Path=/')
return resp
|
# -*- coding: utf-8 -*-
"""Test the module can be imported."""
#import unittest
from runOTcli import main
from opentargets import OpenTargetsClient
from click.testing import CliRunner
def test_doMainCheckT():
"""Test that 'runOTcli' can be executed & passed."""
runner = CliRunner()
result = runner.invoke(main, ['-t', 'ENSG00000197386', '-d', ''])
assert result.exit_code == 0
assert 'Ave: 0.22770631865626045' in result.output
def test_doMainCheckD():
"""Test that 'runOTcli' can be executed & passed."""
runner = CliRunner()
result = runner.invoke(main, ['-t', '', '-d', 'Orphanet_399'])
assert result.exit_code == 0
assert 'Ave: 0.08807390987498148' in result.output
def test_doMainCheckN():
"""Test that 'runOTcli' can be executed & failed."""
runner = CliRunner()
result = runner.invoke(main, ['-t', '', '-d', ''])
assert result.exit_code == 1
|
import faulthandler
import logging
import os
import pathlib
import sys
import determined as det
from determined import _generic, horovod, layers, load
from determined.common import experimental
from determined.common.api import certs
def config_logging(debug: bool) -> None:
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
level=log_level, format="%(asctime)s:%(levelname)s [%(process)s]: %(message)s"
)
logging.getLogger().setLevel(log_level)
logging.debug("Starting harness.")
def main(
env: det.EnvContext, rendezvous_info: det.RendezvousInfo, hvd_config: horovod.HorovodContext
) -> int:
config_logging(env.debug)
if env.experiment_config.debug_enabled():
faulthandler.dump_traceback_later(30, repeat=True)
with det._catch_sys_exit():
try:
controller = load.prepare_controller(
env,
rendezvous_info,
hvd_config,
)
except det.InvalidHP:
# build a Training API object just to call report_early_exit().
session = experimental.Session(None, None, None, certs.cli_cert)
training = _generic.Training(
session,
int(env.det_trial_id),
env.trial_run_id,
int(env.det_experiment_id),
None,
None,
)
training.report_early_exit(_generic.EarlyExitReason.INVALID_HP)
logging.info("InvalidHP detected during Trial init, worker is exiting")
return 0
try:
controller.run()
finally:
# TODO: Refactor load_trial so that it takes a generic context as input.
# That way we can just be inside a context manager and we don't have to keep track of
# errors so closely.
controller.context.distributed.close()
return 0
if __name__ == "__main__":
if len(sys.argv) != 2:
print("worker_process_env_path must be provided as a commandline argument", file=sys.stderr)
sys.exit(1)
# Load the worker process env.
wpc = layers.WorkerProcessContext.from_file(pathlib.Path(sys.argv[1]))
# API code expects credential to be available as an environment variable
os.environ["DET_ALLOCATION_SESSION_TOKEN"] = wpc.env.det_allocation_token
# TODO: refactor websocket, data_layer, and profiling to to not use the cli_cert.
master_url = (
f"http{'s' if wpc.env.use_tls else ''}://" f"{wpc.env.master_addr}:{wpc.env.master_port}"
)
certs.cli_cert = certs.default_load(master_url=master_url)
sys.exit(main(wpc.env, wpc.rendezvous_info, wpc.hvd_config))
|
import unittest
from smac.optimizer.random_configuration_chooser import ChooserNoCoolDown, \
ChooserLinearCoolDown
class TestRandomConfigurationChooser(unittest.TestCase):
def test_no_cool_down(self):
c = ChooserNoCoolDown(modulus=3.0)
self.assertFalse(c.check(1))
self.assertFalse(c.check(2))
self.assertTrue(c.check(3))
self.assertFalse(c.check(4))
self.assertFalse(c.check(5))
self.assertTrue(c.check(6))
self.assertTrue(c.check(30))
c.next_smbo_iteration()
self.assertFalse(c.check(1))
self.assertFalse(c.check(2))
self.assertTrue(c.check(3))
self.assertFalse(c.check(4))
self.assertFalse(c.check(5))
self.assertTrue(c.check(6))
self.assertTrue(c.check(30))
c = ChooserNoCoolDown(modulus=1.0)
self.assertTrue(c.check(1))
self.assertTrue(c.check(2))
c.next_smbo_iteration()
self.assertTrue(c.check(1))
self.assertTrue(c.check(2))
def test_linear_cool_down(self):
c = ChooserLinearCoolDown(2.0, 1.0, 4.0)
self.assertFalse(c.check(1))
self.assertTrue(c.check(2))
self.assertFalse(c.check(3))
self.assertTrue(c.check(4))
self.assertFalse(c.check(5))
self.assertTrue(c.check(6))
self.assertFalse(c.check(7))
self.assertTrue(c.check(8))
c.next_smbo_iteration()
self.assertFalse(c.check(1))
self.assertFalse(c.check(2))
self.assertTrue(c.check(3))
self.assertFalse(c.check(4))
self.assertFalse(c.check(5))
self.assertTrue(c.check(6))
self.assertFalse(c.check(7))
self.assertFalse(c.check(8))
c.next_smbo_iteration()
self.assertFalse(c.check(1))
self.assertFalse(c.check(2))
self.assertFalse(c.check(3))
self.assertTrue(c.check(4))
self.assertFalse(c.check(5))
self.assertFalse(c.check(6))
self.assertFalse(c.check(7))
self.assertTrue(c.check(8))
c.next_smbo_iteration()
self.assertFalse(c.check(1))
self.assertFalse(c.check(2))
self.assertFalse(c.check(3))
self.assertTrue(c.check(4))
self.assertFalse(c.check(5))
self.assertFalse(c.check(6))
self.assertFalse(c.check(7))
self.assertTrue(c.check(8))
if __name__ == "__main__":
unittest.main()
|
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def index():
"""
获取head请求头
Keys: {
Host:
Connection:
Cache-Control:
Requests:
User-Agent:
Accept:
Cookie:
}
:return:
"""
user_agent = request.headers.get("User-Agent")
return '<h1>Your agent is <p>{}</p></h1>'.format(user_agent)
@app.route('/header')
def header():
user_agent = request.headers
return '<h1>Your agent is <p>{}</p></h1>'.format(user_agent)
@app.route('/get')
def get():
"""
get传值:
如,访问
http://127.0.0.1:5000/get?arg=1234
会返回,arg的值
:return:
"""
value = request.args.get('arg')
return '<h1>Your get param is <p>{}</p></h1>'.format(value)
if __name__ == '__main__':
app.run()
|
import json
from django.contrib import messages
from django.contrib.admin import site
from django.contrib.auth import logout
from django.http import JsonResponse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.views import View
from server.announcement.interface import Announcement
from server.challenge.interface import Challenge
from server.submission.interface import Submission
from server.terms.interface import Terms, TermsRequired
from server.trigger.interface import Trigger
from server.user.interface import User, LoginRequired, ProfileRequired
from server.context import Context
from server.exceptions import Error, NotFound, WrongFormat
from frontend.models import Account, Qa, UstcEligible
# noinspection PyMethodMayBeStatic
class HubView(View):
def get(self, request):
if request.user.is_authenticated:
if Account.objects.filter(provider='ustc', user=request.user).exists():
try:
request.user.ustceligible
except UstcEligible.DoesNotExist:
return redirect('ustcprofile')
context = Context.from_request(request)
try:
challenges = Challenge.get_enabled(context)
challenges = {'value': [obj.json for obj in challenges]}
except ProfileRequired as e:
messages.info(request, e.message)
return redirect('profile')
except TermsRequired as e:
messages.info(request, e.message)
return redirect('terms')
except Error as e:
challenges = {'error': e.json}
try:
announcement = Announcement.get_latest(context).json
except NotFound:
announcement = None
if request.user.is_authenticated:
user = User.get(context, request.user.pk)
if user.group == 'other':
ranking = Submission.get_user_ranking(context, request.user.pk)
else:
ranking = Submission.get_user_ranking(context, request.user.pk,
group=user.group)
else:
ranking = {}
return TemplateResponse(request, 'hub.html', {
'announcement': announcement,
'challenges': challenges,
'progress': Submission.get_user_progress(context, request.user.pk),
'ranking': ranking,
'clear_count': Submission.get_clear_count(context),
})
def post(self, request):
try:
matches = Submission.submit(Context.from_request(request),
request.user.pk,
request.POST['challenge'],
request.POST['flag'].strip())
if matches:
messages.success(request, '答案正确')
else:
messages.error(request, '答案错误')
except Error as e:
messages.info(request, e.message)
return redirect('hub')
# noinspection PyMethodMayBeStatic
class AnnouncementsView(View):
def get(self, request):
context = Context.from_request(request)
return TemplateResponse(request, 'announcements.html', {
'announcements': [i.json for i in Announcement.get_all(context)],
})
# noinspection PyMethodMayBeStatic
class BoardView(View):
def get(self, request):
context = Context.from_request(request)
try:
return TemplateResponse(request, 'board.html', {
'filters': {
'category': request.GET.get('category', None),
'group': request.GET.get('group', None),
},
'users': {u.pk: u.json for u in User.get_all(context)},
'challenges': [c.json for c in Challenge.get_enabled(context)],
})
except Error as e:
messages.error(request, e.message)
return redirect('hub')
# noinspection PyMethodMayBeStatic
class FirstView(View):
def get(self, request):
context = Context.from_request(request)
try:
return TemplateResponse(request, 'first.html', {
'filters': {
'group': request.GET.get('group', None),
},
'users': {u.pk: u.json for u in User.get_all(context)},
'challenges': [c.json for c in Challenge.get_enabled(context)],
})
except Error as e:
messages.error(request, e.message)
return redirect('hub')
# noinspection PyMethodMayBeStatic
class LogoutView(View):
def post(self, request):
logout(request)
return redirect('hub')
# noinspection PyMethodMayBeStatic
class ProfileView(View):
def get(self, request):
try:
User.test_authenticated(Context.from_request(request))
except LoginRequired:
return redirect('hub')
return TemplateResponse(request, 'profile.html', {
'profile_required': User.profile_required,
})
def post(self, request):
try:
kwargs = json.loads(request.body)
kwargs = {k: kwargs[k] for k in kwargs if k in User.update_fields}
user = User.get(Context.from_request(request), request.user.pk)
user.update(**kwargs)
return JsonResponse({})
except WrongFormat as e:
return JsonResponse({'error': e.json}, status=400)
# noinspection PyMethodMayBeStatic
class TermsView(View):
def get(self, request):
terms = Terms.get_enabled(Context.from_request(request))
return TemplateResponse(request, 'terms.html', {'terms': terms})
def post(self, request):
context = Context.from_request(request)
try:
User.test_authenticated(context)
except LoginRequired:
return redirect('hub')
for pk in request.POST.getlist('terms'):
Terms.get(context, pk=pk).agree(request.user.pk)
return redirect('hub')
# noinspection PyMethodMayBeStatic
class UserView(View):
def get(self, request):
return TemplateResponse(request, 'user.html')
class ErrorView(View):
def get(self, request):
if request.user.is_superuser:
raise ValueError('ErrorView')
return redirect('hub')
class UstcProfileView(View):
def check(self):
request = self.request
if request.user.is_authenticated:
if Account.objects.filter(provider='ustc', user=request.user).exists():
try:
request.user.ustceligible
return False
except UstcEligible.DoesNotExist:
return True
return False
def get(self, request):
if not self.check():
return redirect('hub')
return TemplateResponse(request, 'ustcprofile.html')
def post(self, request):
if not self.check():
return redirect('hub')
eligible = request.POST['eligible']
if eligible == 'yes':
UstcEligible.objects.create(user=request.user, eligible=True)
user = User.get(Context.from_request(request).copy(elevated=True), request.user.pk)
user.update(group='ustc')
elif eligible == 'no':
UstcEligible.objects.create(user=request.user, eligible=False)
return redirect('hub')
class QaView(View):
def get(self, request):
return TemplateResponse(request, 'qa.html', {'qa': Qa.get()})
# noinspection PyMethodMayBeStatic
class BaseAdminView(View):
title = None
template = None
def get_extra_context(self, user):
return {}
def get(self, request):
try:
return TemplateResponse(request, self.template, {
**site.each_context(request),
**self.get_extra_context(Context.from_request(request)),
'title': self.title,
})
except Error as e:
messages.error(request, e.message)
return redirect('hub')
def post(self, request):
body = json.loads(request.body)
method = body['method']
args = body.get('args', {})
try:
method = getattr(self, f'do_{method}')
value = method(Context.from_request(request), **args)
return JsonResponse({'value': value})
except Error as e:
return JsonResponse({'error': e.json}, status=400)
# noinspection PyMethodMayBeStatic
class AnnouncementAdminView(BaseAdminView):
title = 'Announcement'
template = 'admin_announcement.html'
def do_get_all(self, context):
return [obj.json for obj in Announcement.get_all(context)]
def do_save(self, context, pk, **kwargs):
kwargs = {k: kwargs[k] for k in kwargs
if k in Announcement.update_fields}
if pk is None:
return Announcement.create(context, **kwargs).json
else:
return Announcement.get(context, pk).update(**kwargs)
# noinspection PyUnusedLocal
def do_delete(self, context, pk, **kwargs):
return Announcement.get(context, pk).delete()
# noinspection PyMethodMayBeStatic
class ChallengeAdminView(BaseAdminView):
title = 'Challenge'
template = 'admin_challenge.html'
def do_get_all(self, context):
return [obj.json for obj in Challenge.get_all(context)]
def do_save(self, context, pk, **kwargs):
kwargs = {k: kwargs[k] for k in kwargs if k in Challenge.update_fields}
if pk is None:
return Challenge.create(context, **kwargs).json
else:
return Challenge.get(context, pk).update(**kwargs)
# noinspection PyUnusedLocal
def do_delete(self, context, pk, **kwargs):
return Challenge.get(context, pk).delete()
# noinspection PyMethodMayBeStatic
class SubmissionAdminView(BaseAdminView):
title = 'Submission'
template = 'admin_submission.html'
def get_extra_context(self, context):
return {
'users': {u.pk: u.display_name for u in User.get_all(context)},
'challenges': [c.json for c in Challenge.get_all(context)],
}
def do_get_log(self, context, **kwargs):
return Submission.get_log(context, **kwargs)
def do_get_violations(self, context, **kwargs):
return Submission.get_violations(context, **kwargs)
def do_get_user_progress(self, context, **kwargs):
return Submission.get_user_progress(context, **kwargs)
def do_get_user_history(self, context, **kwargs):
return Submission.get_user_history(context, **kwargs)
def do_get_clear_count(self, context, **kwargs):
return Submission.get_clear_count(context, **kwargs)
def do_get_first(self, context, **kwargs):
return Submission.get_first(context, **kwargs)
def do_get_board(self, context, **kwargs):
return Submission.get_board(context, **kwargs)
# noinspection PyMethodMayBeStatic
class TermsAdminView(BaseAdminView):
title = 'Terms'
template = 'admin_terms.html'
def do_get_all(self, context):
return [obj.json for obj in Terms.get_all(context)]
def do_save(self, context, pk, **kwargs):
kwargs = {k: kwargs[k] for k in kwargs if k in Terms.update_fields}
if pk is None:
return Terms.create(context, **kwargs).json
else:
return Terms.get(context, pk).update(**kwargs)
# noinspection PyUnusedLocal
def do_delete(self, context, pk, **kwargs):
return Terms.get(context, pk).delete()
# noinspection PyMethodMayBeStatic
class TriggerAdminView(BaseAdminView):
title = 'Trigger'
template = 'admin_trigger.html'
def do_get_all(self, context):
return [obj.json for obj in Trigger.get_all(context)]
def do_save(self, context, pk, **kwargs):
kwargs = {k: kwargs[k] for k in kwargs if k in Trigger.update_fields}
if pk is None:
return Trigger.create(context, **kwargs).json
else:
return Trigger.get(context, pk).update(**kwargs)
# noinspection PyUnusedLocal
def do_delete(self, context, pk, **kwargs):
return Trigger.get(context, pk).delete()
# noinspection PyMethodMayBeStatic
class UserAdminView(BaseAdminView):
title = 'User'
template = 'admin_user.html'
def do_get(self, context, pk):
return User.get(context, pk).json
def do_get_all(self, context):
return [obj.json for obj in User.get_all(context)]
def do_save(self, context, pk, **kwargs):
kwargs = {k: kwargs[k] for k in kwargs if k in User.update_fields}
return User.get(context, pk).update(**kwargs)
|
import re
import json
from csv import writer
from parsel import Selector
import os.path
import fnmatch
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Update file storage with new jsons"
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument("repo_dir", default=settings.NACP_DECLARATIONS_PATH)
parser.add_argument("input_dir")
def replace_file(self, existing_json, updated_json):
with open(updated_json, "r") as in_file:
with open(existing_json, "w") as out_file:
json.dump(
json.load(in_file),
out_file,
indent=4,
sort_keys=True,
ensure_ascii=False,
)
def get_submission_date(self, existing_json):
with open(existing_json, "r") as in_file:
try:
doc = json.load(in_file)
return doc.get("created_date")
except json.decoder.JSONDecodeError as e:
self.stderr.write("API brainfart in file {}".format(existing_json))
return None
def handle(self, *args, **options):
self.stdout.write(
"Gathering JSON documents from {}".format(options["repo_dir"])
)
existing_jsons = {}
for root, _, filenames in os.walk(options["repo_dir"]):
for filename in fnmatch.filter(filenames, "*.json"):
base_fname = os.path.basename(filename)
guid = base_fname[-36:]
existing_jsons[guid] = os.path.join(root, filename)
updated_jsons = {}
for root, _, filenames in os.walk(options["input_dir"]):
for filename in fnmatch.filter(filenames, "*.json"):
base_fname = os.path.basename(filename)
guid = base_fname[-36:]
updated_jsons[guid] = os.path.join(root, filename)
self.stdout.write(
"Gathered {} JSON documents in repo, {} JSON documents in incoming dir".format(
len(existing_jsons), len(updated_jsons)
)
)
for x in updated_jsons:
if x in existing_jsons:
self.stdout.write(
"Replacing {} with {}".format(existing_jsons[x], updated_jsons[x])
)
self.replace_file(existing_jsons[x], updated_jsons[x])
else:
self.stderr.write(
"Cannot find {} file in repository".format(updated_jsons[x])
)
for x in existing_jsons:
if x not in updated_jsons:
self.stderr.write(
"Cannot find {} file, submitted on {} in updated jsons".format(
existing_jsons[x], self.get_submission_date(existing_jsons[x])
)
)
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n , m ) :
return ( m * n * ( n + 1 ) * ( m + 1 ) ) // 4
#TOFILL
if __name__ == '__main__':
param = [
(86,70,),
(33,65,),
(3,5,),
(91,12,),
(33,27,),
(13,75,),
(75,36,),
(58,64,),
(50,51,),
(4,44,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
"""Fuzzy Wuzzy Token Sort Similarity Measure"""
from __future__ import division
from difflib import SequenceMatcher
from py_stringmatching import utils
from py_stringmatching.similarity_measure.sequence_similarity_measure import \
SequenceSimilarityMeasure
from py_stringmatching.similarity_measure.partial_ratio import PartialRatio
class PartialTokenSort(SequenceSimilarityMeasure):
"""Computes Fuzzy Wuzzy partial token sort similarity measure.
Fuzzy Wuzzy partial token sort ratio raw raw_score is a measure of the strings similarity as an
int in the range [0, 100]. For two strings X and Y, the score is obtained by
splitting the two strings into tokens and then sorting the tokens. The score is
then the fuzzy wuzzy partial ratio raw score of the transformed strings. Fuzzy Wuzzy token
sort sim score is a float in the range [0, 1] and is obtained by dividing the raw score
by 100.
Note:
In the case where either of strings X or Y are empty, we define the
Fuzzy Wuzzy partial ratio similarity score to be 0.
"""
def __init__(self):
pass
def _process_string_and_sort(self, s, force_ascii, full_process=True):
"""Returns a string with tokens sorted. Processes the string if
full_process flag is enabled. If force_ascii flag is enabled then
processing removes non ascii characters from the string."""
# pull tokens
ts = utils.process_string(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_string = u" ".join(sorted(tokens))
return sorted_string.strip()
def get_raw_score(self, string1, string2, force_ascii=True, full_process=True):
"""
Computes the Fuzzy Wuzzy partial token sort measure raw score between two strings.
This score is in the range [0,100].
Args:
string1,string2 (str), : Input strings
force_ascii (boolean) : Flag to remove non-ascii characters or not
full_process (boolean) : Flag to process the string or not. Processing includes
removing non alphanumeric characters, converting string to lower case and
removing leading and trailing whitespaces.
Returns:
Partial Token Sort measure raw score (int) is returned
Raises:
TypeError: If the inputs are not strings
Examples:
>>> s = PartialTokenSort()
>>> s.get_raw_score('great is scala', 'java is great')
81
>>> s.get_raw_score('Sue', 'sue')
100
>>> s.get_raw_score('C++ and Java', 'Java and Python')
64
References:
* https://pypi.python.org/pypi/fuzzywuzzy
"""
# input validations
utils.sim_check_for_none(string1, string2)
utils.sim_check_for_string_inputs(string1, string2)
# if one of the strings is empty return 0
if utils.sim_check_for_empty(string1, string2):
return 0
sorted1 = self._process_string_and_sort(string1, force_ascii, full_process=full_process)
sorted2 = self._process_string_and_sort(string2, force_ascii, full_process=full_process)
partialRatio = PartialRatio()
return partialRatio.get_raw_score(sorted1, sorted2)
def get_sim_score(self, string1, string2, force_ascii=True, full_process=True):
"""
Computes the Fuzzy Wuzzy partial token sort similarity score between two strings.
This score is in the range [0,1].
Args:
string1,string2 (str), : Input strings
force_ascii (boolean) : Flag to remove non-ascii characters or not
full_process (boolean) : Flag to process the string or not. Processing includes
removing non alphanumeric characters, converting string to lower case and
removing leading and trailing whitespaces.
Returns:
Partial Token Sort measure similarity score (float) is returned
Raises:
TypeError: If the inputs are not strings
Examples:
>>> s = PartialTokenSort()
>>> s.get_sim_score('great is scala', 'java is great')
0.81
>>> s.get_sim_score('Sue', 'sue')
1.0
>>> s.get_sim_score('C++ and Java', 'Java and Python')
0.64
References:
* https://pypi.python.org/pypi/fuzzywuzzy
"""
raw_score = 1.0 * self.get_raw_score(string1, string2, force_ascii, full_process)
sim_score = raw_score / 100
return sim_score
|
# Question model
class Question(object):
# Empty List to hold the questions asked
questions_list = []
# constructor
def __init__(self, title, description, owner):
self.title = title
self.description = description
self.owner = owner
# class method to save a question
def save_question(self, question):
Question.questions_list.append(question)
return dict(
title=Question.questions_list[0].title,
description=Question.questions_list[0].description,
owner=Question.questions_list[0].owner
)
# class method to update questions details
def update_question(self, question):
# get the question data from database
# make the necessary edits
# save the data back to db
pass
|
"""This module extends the C++ 2003 iterator header with common extensions."""
import fbuild.config.cxx as cxx
import fbuild.config.cxx.cxx03 as cxx03
# ------------------------------------------------------------------------------
class iterator(cxx03.iterator):
bidirectional_iterator = cxx.template_test(test_types=['int', 'int'])
forward_iterator = cxx.template_test(test_types=['int', 'int'])
|
"""Copyright (c) 2021 Alistair Child
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import re
import numpy as np
class Sample:
# Properties to be selected from file.
def __init__(self, energy, intensity):
self.energy = energy
self.intensity = intensity
def parse_data_file(data):
"Parse EXAFS Samples from file. If the data file/column format changes accomodate that in here."
pattern_seen = None
# Sample record lines look like:-
# 1086.33 280
# Matching the time stamp pattern at start of the data lines seems adequate:
DATA_RE = "#Acquired="
DATA_CRE = re.compile(DATA_RE) # Pre-compile the pattern for greater speed
# List of column indices corresponding to required properties:
# (energy, intensity)
SELECT_COLS = (0, 1)
samples = []
data_file = data.split("\n")
# Only process valid looking data lines
for line in data_file:
# if pattern noticed
if pattern_seen == True:
# Split variables by tab seperation
vals = line.split("\t")
# Select values from the columns that we want
# and convert from text to float at same time.
props = [float(vals[col]) for col in SELECT_COLS]
# Construct XMCD Sample object from selected values.
# This assumes COLS are listed in order required by
# the Sample constructor.
samples.append(Sample(*props))
# evaluate after since data starts after pattern
if DATA_CRE.match(line):
pattern_seen = True
return samples
|
from __future__ import absolute_import
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import os
import posixpath
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from collections import deque
from pip._vendor import pkg_resources
# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import.
from pip._vendor.retrying import retry # type: ignore
from pip._vendor.six import PY2
from pip._vendor.six.moves import input
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import (
running_under_virtualenv, site_packages, user_site, virtualenv_no_global,
write_delete_marker_file,
)
from pip._internal.utils.compat import (
WINDOWS, console_to_str, expanduser, stdlib_pkgs,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
if MYPY_CHECK_RUNNING:
from typing import (
Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text,
AnyStr, Container
)
from pip._vendor.pkg_resources import Distribution
from pip._internal.models.link import Link
from pip._internal.utils.ui import SpinnerInterface
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION',
'get_installed_version', 'remove_auth_from_url']
logger = std_logging.getLogger(__name__)
WHEEL_EXTENSION = '.whl'
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION)
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def ensure_dir(path):
# type: (AnyStr) -> None
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
# type: () -> str
try:
prog = os.path.basename(sys.argv[0])
if prog in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
else:
return prog
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
# type: (str, bool) -> None
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
# type: (Union[str, Text]) -> str
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
# type: (str, str) -> str
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
# type: (str, Iterable[str]) -> str
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
# type: (str, Iterable[str]) -> str
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
# type: (float) -> str
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
# type: (str) -> bool
"""Is path is a directory containing setup.py or pyproject.toml?
"""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
pyproject_toml = os.path.join(path, 'pyproject.toml')
if os.path.isfile(pyproject_toml):
return True
return False
def is_svn_page(html):
# type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
# type: (str) -> Text
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return [path, '']
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
# type: (str, bool) -> str
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
# type: (str) -> Tuple[str, str]
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
# type: (str, str) -> None
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
# type: (str) -> bool
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in
sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
# type: (bool, Container[str], bool, bool, bool) -> List[Distribution]
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
# because of pkg_resources vendoring, mypy cannot find stub in typeshed
return [d for d in pkg_resources.working_set # type: ignore
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None
def dist_location(dist):
# type: (Distribution) -> str
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([
member.name for member in tar.getmembers()
])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type, # type: Optional[str]
link # type: Optional[Link]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip._internal.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def call_subprocess(
cmd, # type: List[str]
show_stdout=False, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
unset_environ=None, # type: Optional[Iterable[str]]
spinner=None # type: Optional[SpinnerInterface]
):
# type: (...) -> Optional[Text]
"""
Args:
extra_ok_returncodes: an iterable of integer return codes that are
acceptable, in addition to 0. Defaults to None, which means [].
unset_environ: an iterable of environment variable names to unset
prior to calling subprocess.Popen().
"""
if extra_ok_returncodes is None:
extra_ok_returncodes = []
if unset_environ is None:
unset_environ = []
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. It is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip use show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.debug("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
for name in unset_environ:
env.pop(name, None)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
stdout=stdout, cwd=cwd, env=env,
)
proc.stdin.close()
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
try:
proc.wait()
finally:
if proc.stdout:
proc.stdout.close()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode and proc.returncode not in extra_ok_returncodes:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return ''.join(all_output)
return None
def read_text_file(filename):
# type: (str) -> str
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
# https://github.com/python/mypy/issues/1174
data = data.decode(enc) # type: ignore
except UnicodeDecodeError:
continue
break
assert not isinstance(data, bytes) # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
def captured_stderr():
"""
See captured_stdout().
"""
return captured_output('stderr')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name, working_set=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
if working_set is None:
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
# Simulates an enum
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = {value: key for key, value in enums.items()}
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
"""
Return the URL for a VCS requirement.
Args:
repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
project_name: the (unescaped) project name.
"""
egg_project_name = pkg_resources.to_filename(project_name)
req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name)
if subdir:
req += '&subdirectory={}'.format(subdir)
return req
def split_auth_from_netloc(netloc):
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if '@' not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit('@', 1)
if ':' in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user_pass = auth.split(':', 1)
else:
user_pass = auth, None
user_pass = tuple(
None if x is None else urllib_unquote(x) for x in user_pass
)
return netloc, user_pass
def redact_netloc(netloc):
# type: (str) -> str
"""
Replace the password in a netloc with "****", if it exists.
For example, "user:pass@example.com" returns "user:****@example.com".
"""
netloc, (user, password) = split_auth_from_netloc(netloc)
if user is None:
return netloc
password = '' if password is None else ':****'
return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user),
password=password,
netloc=netloc)
def _transform_url(url, transform_netloc):
purl = urllib_parse.urlsplit(url)
netloc = transform_netloc(purl.netloc)
# stripped url
url_pieces = (
purl.scheme, netloc, purl.path, purl.query, purl.fragment
)
surl = urllib_parse.urlunsplit(url_pieces)
return surl
def _get_netloc(netloc):
return split_auth_from_netloc(netloc)[0]
def remove_auth_from_url(url):
# type: (str) -> str
# Return a copy of url with 'username:password@' removed.
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)
def redact_password_from_url(url):
# type: (str) -> str
"""Replace the password in a given url with ****."""
return _transform_url(url, redact_netloc)
def protect_pip_from_modification_on_windows(modifying_pip):
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip.exe",
"pip{}.exe".format(sys.version_info[0]),
"pip{}.{}.exe".format(*sys.version_info[:2])
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and
WINDOWS and
os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [
sys.executable, "-m", "pip"
] + sys.argv[1:]
raise CommandError(
'To modify pip, please run the following command:\n{}'
.format(" ".join(new_command))
)
|
from argparse import Action, ArgumentError, ArgumentTypeError
from importlib import import_module
import os
import re
import shlex
import sys
from snakeoil.cli import arghparse, tool
from . import const
from .alias import Aliases
from .base import get_service_cls
from .config import Config
from .exceptions import BiteError
from .objects import TimeInterval, IntRange
from .utils import block_edit, confirm
class ArgType(object):
def __init__(self, service):
self.service = service
def __call__(self, data, stdin=False):
if stdin:
return self.parse_stdin(data)
elif not sys.stdin.isatty() and data == '-':
return data
return self.parse(data)
@staticmethod
def parse(s):
"""Parse string value into expected argument type."""
return s
def parse_stdin(self, data):
"""Parse standard input into expected argument type."""
return data
class StringList(ArgType):
@staticmethod
def parse(s):
return [item for item in s.split(',') if item != ""]
class IDs(ArgType):
@staticmethod
def parse(s):
try:
i = int(s)
# negative IDs are invalid
if i < 0:
raise ValueError
except ValueError:
raise ArgumentTypeError(f'invalid ID value: {s!r}')
return i
def parse_stdin(self, data):
return [self.parse(x) for x in data]
class IntList(ArgType):
@staticmethod
def parse(s):
l = []
for item in s.split(','):
try:
l.append(int(item))
except ValueError:
raise ArgumentTypeError(f'invalid integer value: {item!r}')
return l
class IDList(ArgType):
@staticmethod
def parse(s):
l = []
for item in s.split(','):
l.append(IDs.parse(item))
return l
class ID_Maps(ArgType):
@staticmethod
def parse(s):
id_str, _sep, map_str = s.partition(':')
id = IDs.parse(id_str)
mapped_ids = [IDs.parse(x) for x in map_str.split(',')]
return id, tuple(mapped_ids)
def parse_stdin(self, data):
return [self.parse(x) for x in data]
class ID_Str_Maps(ArgType):
@staticmethod
def parse(s):
id_str, _sep, map_str = s.partition(':')
id = IDs.parse(id_str)
mapped_ids = map_str.split(',') if map_str else []
return id, tuple(mapped_ids)
def parse_stdin(self, data):
return [self.parse(x) for x in data]
class Comment(ArgType):
@staticmethod
def parse(s):
data = ''
while True:
if s == '__BITE_EDITOR__':
data = block_edit('Enter a comment').strip()
elif os.path.exists(s):
if confirm(prompt=f'Use file for comment: {s!r}?', default=True):
try:
with open(s) as f:
data = f.read().strip()
if confirm(prompt=f'Edit comment?'):
data = block_edit('Edit comment', comment_from=data).strip()
except IOError as e:
raise BiteError('unable to read file: {s!r}: {e}')
else:
data = s
if data or confirm('Empty comment, submit anyway?'):
break
return data
def parse_stdin(self, data):
if not data:
raise ArgumentTypeError('no comment data provided on stdin')
return '\n'.join(data)
class TimeIntervalArg(ArgType):
@staticmethod
def parse(s):
try:
return TimeInterval(s)
except ValueError as e:
raise ArgumentTypeError(e)
class IntRangeArg(ArgType):
@staticmethod
def parse(s):
try:
return IntRange(s)
except ValueError as e:
raise ArgumentTypeError(e)
class parse_file(Action):
def __call__(self, parser, namespace, values, option_string=None):
lines = (shlex.split(line.strip()) for line in values)
setattr(namespace, self.dest, lines)
class ParseStdin(Action):
def __init__(self, type_func=None, append=True, *args, **kwargs):
self.type_func = type_func if type_func is not None else lambda x, stdin: x
self.append = append
super().__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
stdin_opt = (
isinstance(values, (str, list, tuple)) and
len(values) == 1 and
values[0] == '-'
)
if stdin_opt and not sys.stdin.isatty():
if option_string is None:
option = (self.dest, self.dest)
else:
option = (self.dest, option_string)
try:
stdin = getattr(namespace, 'stdin')
parser.error(f'argument {option[1]}: data from standard input '
'already being used for argument {stdin[1]}')
except AttributeError:
# store option for stdin check above
setattr(namespace, 'stdin', option)
# read args from standard input for specified option
values = [s for s in (x.strip() for x in sys.stdin.readlines()) if s]
# get type conversion func
if not callable(self.type_func):
try:
self.type_func = parser._registries['type'][self.type_func]
except KeyError:
raise ArgumentTypeError(f'unknown type: {self.type_func!r}')
# convert values to expected types
try:
values = self.type_func(values, stdin=True)
except ArgumentTypeError as e:
raise ArgumentError(self, e)
# make sure values were piped via stdin for required args
if not values and self.required:
raise ArgumentError(self, 'missing required values piped via stdin')
# append multiple args by default for array-based options
previous = getattr(namespace, self.dest, None)
if self.append and isinstance(previous, list):
values = previous + values
setattr(namespace, self.dest, values)
class override_attr(Action):
"""Override or set the value of a module's attribute."""
def __init__(self, target, attr, *args, **kwargs):
self.target = target
self.attr = attr
super().__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(self.target, str):
try:
target = import_module(self.target)
except ImportError:
raise ArgumentTypeError(f"couldn't import module: {self.target!r}")
else:
target = self.target
setattr(target, self.attr, values)
class parse_append(Action):
def __call__(self, parser, namespace, values, option_string=None):
if not isinstance(values, list):
values = [values]
current = getattr(namespace, self.dest)
if current is None:
setattr(namespace, self.dest, values)
else:
current.extend(values)
class ArgumentParser(arghparse.ArgumentParser):
@staticmethod
def _substitute_args(args, initial_args):
for input_list in initial_args.input:
line = []
try:
for s in args:
if re.match(r'^@[0-9]+$', s):
line.append(input_list[int(s[1:])])
elif re.match(r'^@@$', s):
line.extend(input_list)
else:
line.append(s)
yield line
except IndexError:
raise RuntimeError(f'nonexistent replacement {s!r}, only {len(input_list)} values exist')
def parse_args(self, args=None, namespace=None):
# pull config and service settings from args if they exist
initial_args, unparsed_args = self.parse_known_optionals(args, namespace)
config_file = initial_args.pop('config_file')
# load alias files
aliases = Aliases()
# check if unparsed args match any global aliases
if unparsed_args:
alias_unparsed_args = aliases.substitute(unparsed_args)
# re-parse optionals to catch any added by aliases
if unparsed_args != alias_unparsed_args:
initial_args, unparsed_args = self.parse_known_optionals(alias_unparsed_args, initial_args)
# load config files
config = Config(
path=config_file, connection=initial_args.connection,
base=initial_args.base, service=initial_args.service)
initial_args.connection = config.connection
# pop base and service settings from the config and add them to parsed args
# if not already specified on the command line
for attr in ('base', 'service'):
if getattr(initial_args, attr, None) is None:
value = config.get(config.connection, attr, fallback=None)
setattr(initial_args, attr, value)
config.remove_option(config.connection, attr)
if initial_args.base is None or initial_args.service is None:
self.error('both arguments -b/--base and -s/--service are required '
'or must be specified in the config file for a connection')
elif not re.match(r'^http(s)?://.+', initial_args.base):
self.error(f'invalid base: {initial_args.base!r}')
service_name = initial_args.pop('service')
if service_name not in const.SERVICES:
self.error(f"invalid service: {service_name!r} "
f"(available services: {', '.join(const.SERVICES)}")
service_opts = get_service_cls(
service_name, const.SERVICE_OPTS, fallbacks=(True,))(
parser=self, service_name=service_name)
# add service config options to args namespace
service_opts.add_config_opts(args=initial_args, config_opts=config.opts)
# initialize requested service
service = get_service_cls(service_name, const.SERVICES)(**vars(initial_args))
try:
# add service specific main opts to the argparser
service_opts.add_main_opts(service=service)
# re-parse for any top level service-specific options that were added
initial_args, unparsed_args = self.parse_known_optionals(unparsed_args, initial_args)
except ArgumentError as e:
# skip multiple main_opts() run issues during doc generation
if 'conflicting option string' not in str(e):
raise
except NotImplementedError:
# no main opts to add
pass
# check if unparsed args match any aliases
if unparsed_args:
alias_unparsed_args = aliases.substitute(
unparsed_args, config=config, config_opts=config.opts,
connection=initial_args.connection, service_name=service_name,
debug=initial_args.debug)
# re-parse optionals to catch any added by aliases
if unparsed_args != alias_unparsed_args:
initial_args, unparsed_args = self.parse_known_optionals(
alias_unparsed_args, initial_args)
# add selected subcommand options
try:
subcmd = unparsed_args.pop(0)
subcmd = service_opts.add_subcmd_opts(service=service, subcmd=subcmd)
except IndexError:
subcmd = None
# no more args exist or help requested, run main parser to show related output
if subcmd is None:
return super().parse_args()
self.set_defaults(connection=initial_args.connection)
if initial_args.input is not None:
fcn_args = self._substitute_args(unparsed_args, initial_args)
else:
fcn_args = subcmd.parser.parse_args(unparsed_args)
# if an arg was piped in, remove stdin attr from fcn args and reopen stdin
stdin = fcn_args.pop('stdin', None)
if stdin is not None:
sys.stdin = open('/dev/tty')
fcn_args = subcmd.finalize_args(vars(fcn_args))
# fix called function name for nested subcommands
if 'prog' in fcn_args:
fcn_args['fcn' ] = fcn_args['prog'].split(' ', 1)[1].replace(' ', '_')
# client settings that override unset service level args
for attr in ('verbosity', 'debug'):
if not getattr(service, attr):
setattr(service, attr, fcn_args.get(attr))
# set args namespace items for the client
initial_args.service = service
initial_args.fcn_args = fcn_args
return initial_args
class Tool(tool.Tool):
"""Handle bite-specific commandline utility functionality."""
def handle_exec_exception(self, e):
"""Handle bite-specific errors."""
if isinstance(e, BiteError):
if self.parser.debug:
raise e
elif self.parser.verbosity >= 0:
msg = e.message if self.parser.verbosity else str(e)
self.parser.error(msg)
return 1
else:
# exception is unhandled here, fallback to generic handling
super(Tool, self).handle_exec_exception(e)
|
n=int(input('enter value of n:'))
for i in range(n):
print(((n-i-1)*" " + "* "*(i+1)))
|
class HashTable(object):
"""implements a simple HastTable"""
def __init__(self, size=1024):
self._list_size = size
self._bucket_list = [[] for bucket in xrange(0, size)]
def hash(self, key):
if not isinstance(key, basestring):
raise TypeError("key must take a string")
sum_ord = 0
for letter in key:
sum_ord += ord(letter)
return sum_ord % self._list_size
def set(self, key, value):
hash = self.hash(key)
for item in self._bucket_list[hash]:
if item[0] == key:
item[1] = value
return
self._bucket_list[hash].append([key, value])
def get(self, key):
hash = self.hash(key)
for item in self._bucket_list[hash]:
if item[0] == key:
return item[1]
raise KeyError("key not found in hash table")
|
# Copyright (C) 2015-2017 XLAB, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ as env
import novaclient.client as nvclient
from keystoneauth1 import session
from keystoneauth1.identity import v2
IMAGE = "openfoam.cases"
FLAVOR = "of.small"
# Authenticate using ENV variables
auth = v2.Password(
auth_url=env['OS_AUTH_URL'],
username=env['OS_USERNAME'],
password=env['OS_PASSWORD'],
tenant_id=env['OS_TENANT_ID'])
# Open auth session
sess = session.Session(auth=auth)
nova = nvclient.Client("2", session=sess)
for nic in nova.networks.list():
print nic.id + " " + nic.label
|
#compdef imageMounter.py
local arguments
arguments=(
'--version[show programs version number and exit]'
'(- * :)'{-h,--help}'[show this help message and exit]'
{-s,--single}'[single partition in image]'
{-i,--info}'[just Display the information]'
{-e,--e01}'[use ewfmount to mount E0 Evidence Files]'
{-b,--bitlocker}'[mount Bitlocker Drive with Recovery Key]'
{-k,--key}'[bitlokcer Recovery Key]'
'*:filename:_files'
)
_arguments -s $arguments
|
import sys
from cx_Freeze import setup, Executable
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name="XcalxS",
version="0.1.0",
description="RPN Calculator",
options={
"build_exe": {
"include_msvcr": False,
"optimize": 2,
"packages": [],
"includes": ['atexit', 'solver_tab', 'solver_lex'],
"excludes": ["email", 'PySide.QtNetwork'],
"bin_path_includes": [],
"bin_path_excludes": [],
"bin_includes": [],
"bin_excludes": ['imageformats/*.dll'],
"include_files": [],
},
},
executables=[
Executable("xcalcs.py", base=base),
Executable("console.py", base=base)
],
)
|
import requests
import time
from hexbytes import HexBytes
from typing import List, NamedTuple, Set, Tuple, Union, Dict
def trace_transaction(tx_hash: str, disable_memory=True,
disable_storage=True, disable_stack=True):
return {"id": 1,
"method": "debug_traceTransaction",
"params": [tx_hash,
{"disableMemory": disable_memory,
"disableStorage": disable_storage,
"disableStack": disable_stack}
]
}
def trace_block_by_number(block_number: int, disable_memory=True,
disable_storage=True, disable_stack=True):
return {"id": 1,
"method": "debug_traceBlockByNumber",
"params": ['0x%x' % block_number,
{"disableMemory": disable_memory,
"disableStorage": disable_storage,
"disableStack": disable_stack}
]
}
class DecodedCallTrace(NamedTuple):
op: str
gas: int
address: str
value: int
args_offset: int
args_length: int
ret_offset: int
ret_length: int
def decode_call_trace(trace: Dict[str, any], next_trace: Dict[str, any]) -> DecodedCallTrace:
"""
Takes a trace and decodes it. It needs next trace for return value
Structure for CALL and CALLCODE:
gas | addr | value | argsOffset | argsLength | retOffset | retLength
"""
# TODO Check stack is present
gas, address, value, args_offset, args_length, ret_offset, ret_length = reversed(trace['stack'][-7:])
gas = int(gas, 16)
# TODO checksum encoded address
address = HexBytes(address.lstrip('0')).hex()
value = int(value, 16)
args_offset = int(args_offset, 16)
args_length = int(args_length, 16)
ret_offset = int(ret_offset, 16)
ret_length = int(ret_length, 16)
return DecodedCallTrace(trace['op'], gas, address, value, args_offset, args_length,
ret_offset, ret_length)
def decode_delegate_call_trace(trace: Dict[str, any], next_trace: Dict[str, any]) -> DecodedCallTrace:
"""
Takes a trace and decodes it. It needs next trace for return value
Structure for CALL and CALLCODE:
gas | addr | argsOffset | argsLength | retOffset | retLength
"""
# TODO Check stack is present
gas, address, args_offset, args_length, ret_offset, ret_length = reversed(trace['stack'][-6:])
gas = int(gas, 16)
# TODO checksum encoded address
address = HexBytes(address.lstrip('0')).hex()
value = 0 # No value in DELEGATECALL
args_offset = int(args_offset, 16)
args_length = int(args_length, 16)
ret_offset = int(ret_offset, 16)
ret_length = int(ret_length, 16)
return DecodedCallTrace(trace['op'], gas, address, value, args_offset, args_length,
ret_offset, ret_length)
def decode_trace(trace, next_trace) -> DecodedCallTrace:
decoded_call_trace = None
if trace['op'] in ('CALL', 'CALLCODE'):
return decode_call_trace(trace, next_trace)
elif trace['op'] in ('DELEGATECALL', 'STATICCALL'):
return decode_delegate_call_trace(trace, next_trace)
return None
def decode_tx():
node_url = 'http://localhost:8545'
tx_hash = '0x0142c3f42220d839af4f1dbb7b9ab9482669ab8714c785fdd418d954077f9816'
post = trace_transaction(tx_hash, disable_stack=False)
response = requests.post(node_url, json=post)
gas = response.json()['result']['gas']
failed = response.json()['result']['failed']
return_value = response.json()['result']['returnValue']
struct_logs = response.json()['result']['structLogs']
for i in range(len(struct_logs) - 1):
decoded_call_trace = decode_trace(struct_logs[i], struct_logs[i + 1])
if decoded_call_trace:
print(decoded_call_trace)
def decode_block(block):
for tx in block:
struct_logs = tx['result']['structLogs']
for i in range(len(struct_logs) - 1):
decoded_call_trace = decode_trace(struct_logs[i], struct_logs[i + 1])
if decoded_call_trace:
print(decoded_call_trace)
def decode_blocks():
node_url = 'http://localhost:8545'
first_block = 7235792
for block_number in range(first_block, 7241099):
print('Block\t%d' % block_number)
post = trace_block_by_number(block_number, disable_stack=False)
response = requests.post(node_url, json=post)
# Decode json
start = time.time()
print('Start', start)
response_json = response.json()
end = time.time()
print('Elapsed', end - start)
decode_block(response_json['result'])
# decode_tx()
decode_blocks()
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
k=4 #number of clusters
n_samples=1500
random_state=17
X,y=make_blobs(n_samples=n_samples,centers=k,n_features=2,random_state=random_state)
color='black'
size=1
plt.figure()
plt.scatter(X[:,0],X[:,1],c=color,s=size)
|
# Original Code
# https://github.com/huggingface/transformers/blob/master/src/transformers/configuration_utils.py
# See https://github.com/graykode/matorage/blob/0.1.0/NOTICE
# modified by TaeHwan Jung(@graykode)
# Copyright 2020-present Tae Hwan Jung
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
class Serialize(object):
def __str__(self):
return self.__repr__()
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_dict(self):
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
return copy.deepcopy(self.__dict__)
def to_json_file(self, json_file_path):
"""
Save this instance to a json file.
Args:
json_file_path (:obj:`string`):
Path to the JSON file in which this configuration instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def to_json_string(self):
"""
Serializes this instance to a JSON string.
Returns:
:obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
"""
return json.dumps(self.to_dict(), indent=4, sort_keys=True) + "\n"
@classmethod
def from_json_file(cls, json_file):
"""
Constructs a `Config` from the path to a json file of parameters.
Args:
json_file (:obj:`string`):
Path to the JSON file containing the parameters.
Returns:
:class:`DataConfig ModelConfig`: An instance of a configuration object
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
|
import shutil
from pathlib import Path
from metaerg.data_model import Genome, SeqRecord, SeqFeature, FeatureType
from metaerg import context
from metaerg import bioparsers
def _run_programs(genome:Genome, result_files):
fasta_file = context.spawn_file('masked', genome.id)
bioparsers.write_genome_to_fasta_files(genome, fasta_file, mask=True)
lmer_table_file = context.spawn_file('lmer-table', genome.id)
repeatscout_file_raw = context.spawn_file('repeatscout-raw', genome.id)
repeatscout_file_filtered = context.spawn_file('repeatscout-filtered', genome.id)
context.run_external(f'build_lmer_table -sequence {fasta_file} -freq {lmer_table_file}')
context.run_external(f'RepeatScout -sequence {fasta_file} -output {repeatscout_file_raw} -freq {lmer_table_file}')
with open(repeatscout_file_filtered, 'w') as output, open(repeatscout_file_raw) as input:
context.run_external('filter-stage-1.prl', stdin=input, stdout=output)
context.run_external(f'RepeatMasker -pa {context.CPUS_PER_GENOME} -lib {repeatscout_file_filtered} -dir . '
f'{fasta_file}')
repeatmasker_output_file = Path(f'{fasta_file.name}.out') # nothing we can do about that
shutil.move(repeatmasker_output_file, result_files[0])
for file in Path.cwd().glob(f'{fasta_file.name}.*'):
if file.is_dir():
shutil.rmtree(file)
else:
file.unlink()
def words2feature(words: list[str], contig: SeqRecord) -> SeqFeature:
start = int(words[5]) - 1
end = int(words[6])
strand = -1 if 'C' == words[8] else 1
seq = contig.seq[start:end]
if strand < 0:
seq = bioparsers.reverse_complement(seq)
return SeqFeature(start, end, strand, FeatureType.repeat, seq=seq, inference='repeatmasker')
def _read_results(genome:Genome, result_files) -> int:
"""(1) simple repeats, these are consecutive
(2) unspecified repeats, these occur scattered and are identified by an id in words[9]. We only
add those when they occur 10 or more times."""
repeat_count = 0
repeat_hash = dict()
with open(result_files[0]) as repeatmasker_handle:
for line in repeatmasker_handle:
words = line.split()
if len(words) < 11 or words[0] in ('SW', 'score'):
continue
try:
contig: SeqRecord = genome.contigs[words[4]]
except KeyError:
context.log(f'({genome.id}) Warning: Unknown contig id "{words[4]}"')
continue
if 'Simple_repeat' == words[10]:
repeat_count += 1
feature = words2feature(words, contig)
contig.features.append(feature)
feature.notes.add(f'repeat {words[9]}')
else:
repeat_list = repeat_hash.setdefault(words[9], list())
repeat_list.append((contig, words2feature(words, contig)))
for repeat_list in repeat_hash.values():
if len(repeat_list) >= 10:
for c, f in repeat_list:
repeat_count += 1
c.features.append(f)
f.notes.add(f' (occurs {len(repeat_list)}x)')
return repeat_count
@context.register_annotator
def run_and_read_repeatmasker():
return ({'pipeline_position': 51,
'purpose': 'repeat prediction with repeatmasker',
'programs': ('build_lmer_table', 'RepeatScout', 'filter-stage-1.prl', 'RepeatMasker'),
'result_files': ('repeatmasker',),
'run': _run_programs,
'read': _read_results})
|
from aiogram.dispatcher.filters.state import StatesGroup, State
class TodoStates(StatesGroup):
"""
For todo_handl
"""
todo = State()
reception_todo = State()
class PasswordStates(StatesGroup):
"""
For storing_passwords_handl
"""
check_personal_code = State()
successful_auth_for_pass = State()
set_name_pass = State()
class UserSettingStates(StatesGroup):
"""
For:
user_settings
day_todo_notification
weather_status_handl
changing_stikerpack_handl
start_handl
"""
settings = State()
time_todo = State()
weather_on = State()
class RecipeStates(StatesGroup):
"""
For recipes_handl.
"""
recipe_manipulation = State()
get_the_recipe = State()
recipe_ingredients = State()
and_now_the_recipe = State()
recipe_photo_reception = State()
|
from agu_api import AguApi
import string
import math
import json
# Export path
# out_path = 'database/'
out_path = '../../app/private/'
# Export only if flag is true
exp_abstracts = True
exp_programs = True
exp_sessions = True
years = [2017] # [2014, 2015, 2016]
# these programs do not contain abstracts
# exclude_program_ids = [92, 100, 113] # for 2016
exclude_program_ids = [307, 308, 315, 316, 335, 336] # for 2017
# in 2017 data the list of authors is inverted
# set this variable to -1 if you want to reverse the list
# to +1 if you want to keep the order
authors_order = -1
# get an instance of the API
api = AguApi()
# Export abracts
if exp_abstracts:
data = []
for year in years:
authors = api.authors(year)
# build authors dictionary
authors_dict = {}
for author in authors:
authors_dict[author['personId']] = author
abstracts = api.abstracts(year)
export_keys = {'abstractId', 'text', 'title', 'sessionId', 'roomId'}
for abstract in abstracts:
abstract_doc = {k:v for k,v in abstract.items() if k in export_keys}
# Add authors
people = []
for person in abstract['abstractRoles'][::authors_order]:
p_id = person['authorId']
person_data = {}
person_data['firstName'] = authors_dict[p_id]['firstName']
person_data['middleName'] = authors_dict[p_id]['middleName']
person_data['lastName'] = authors_dict[p_id]['lastName']
person_data['presenter'] = person['presenter']
people.append(person_data)
abstract_doc['authors'] = people
data.append(abstract_doc)
out_file = out_path + 'abstracts-DB.json'
print("EXPORT {} ABSTRACTS TO {}".format(len(data), out_file))
with open(out_file, 'w') as f:
json.dump(data, f)
# Export programs
if exp_programs:
data = []
for year in years:
programs = api.programs(year)
export_keys = {'programId', 'title'}
for program in programs:
if program['programId'] in exclude_program_ids:
continue
program_doc = {k:v for k,v in program.items() if k in export_keys}
program_doc['year'] = year
data.append(program_doc)
out_file = out_path + 'programs-DB.json'
print("EXPORT {} PROGRAMS TO {}".format(len(data), out_file))
with open(out_file, 'w') as f:
json.dump(data, f)
# Export sessions
if exp_sessions:
data = []
for year in years:
sessions = api.sessions(year)
export_keys = {'sessionId', 'title', 'finalSessionNumber', 'programId', 'sessionRoles', 'roomId'}
for session in sessions:
data.append({k:v for k,v in session.items() if k in export_keys})
out_file = out_path + 'sessions-DB.json'
print("EXPORT {} SESSIONS TO {}".format(len(data), out_file))
with open(out_file, 'w') as f:
json.dump(data, f)
|
import pytest
from gtt import db
from gtt.models import Technique
from gtt.fixtures import database
@pytest.mark.usefixtures("database")
def test_create_read():
"""Create a technique in the database and read it back
"""
write_technique = Technique()
write_technique.name = "Hammer On"
write_technique.short_description = "Rapidly pull on and off the string"
write_technique.save()
read_technique = Technique.find(write_technique.id)
assert read_technique.id == write_technique.id
assert read_technique.name == write_technique.name
assert read_technique.short_description == write_technique.short_description
@pytest.mark.usefixtures("database")
def test_create_update_read():
"""Create and update a technique in the database and read it back
"""
write_technique = Technique()
write_technique.name = "Hammer On"
write_technique.short_description = "Rapidly pull on and off the string"
write_technique.save()
write_technique.name = "Pull off"
write_technique.save()
read_technique = Technique.find(write_technique.id)
assert read_technique.id == write_technique.id
assert read_technique.name == write_technique.name
assert read_technique.short_description == write_technique.short_description
@pytest.mark.usefixtures("database")
def test_find_all():
"""Write 3 techniques to the database, then test that they are returned
with find_all()"""
for i in range(3):
technique = Technique()
technique.name = str(i)
technique.save()
techniques = Technique.find_all()
for i in range(3):
assert techniques[i].name == str(i)
assert len(techniques) == 3
|
#
# PySNMP MIB module Chromatis-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Chromatis-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:34:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, Counter64, ObjectIdentity, Counter32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, ModuleIdentity, MibIdentifier, Integer32, Gauge32, Bits, NotificationType, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Counter64", "ObjectIdentity", "Counter32", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "ModuleIdentity", "MibIdentifier", "Integer32", "Gauge32", "Bits", "NotificationType", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
chromatis = ModuleIdentity((1, 3, 6, 1, 4, 1, 3695))
chromatis.setRevisions(('1999-05-17 18:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: chromatis.setRevisionsDescriptions(('Compiled for the first time by Zvika',))
if mibBuilder.loadTexts: chromatis.setLastUpdated('9905170000Z')
if mibBuilder.loadTexts: chromatis.setOrganization('Chromatis Networks Inc.')
if mibBuilder.loadTexts: chromatis.setContactInfo('Chromatis Networks 21 c Yagea Kapaim , Kiryat Arye, Petach Tikva, Israel Phone: 972-3-9231030 Fax: 972-3-9231050 emil@chromatis.com')
if mibBuilder.loadTexts: chromatis.setDescription("This MIB module is the SNMP version of Chromatis Networks' Metrpolis")
chrCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1))
chrProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2))
chrComHW = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1))
chrComIf = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2))
chrComConfigVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 3))
chrComSwVersion = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 4))
chrComAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 5))
chrComTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 6))
chrComActions = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 7))
chrComCompressData = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 8))
chrComAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9))
chrComPM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10))
chrComFM = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 11))
chrComProtection = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12))
chrComNetwork = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13))
chrComHwNe = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 1, 1))
chrComIfSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 1))
chrComIfAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 2))
chrComIfOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 3))
chrComIfDS3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 4))
chrComIfEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 2, 5))
chrComAtmVpl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1))
chrComAtmVcl = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 9, 2))
chrComPmOptics = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 1))
chrComPmSonet = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 2))
chrComPmDs3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 3))
chrComPmAtm = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 4))
chrComPmEth = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 10, 5))
chrComProtectionGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 1))
chrComProtectionVp = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 2))
chrComProtectionVc = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 3))
chrComProtectSinglePath = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 4))
chrComProtectEquip = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 12, 5))
chrComNetClockSync = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 1, 13, 1))
chrProductsMetropolis2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 1))
chrProductsMetropolis2500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 2))
chrProductsMetropolis4000 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 3))
chrProductsMetropolis4500 = MibIdentifier((1, 3, 6, 1, 4, 1, 3695, 2, 4))
mibBuilder.exportSymbols("Chromatis-MIB", chrComFM=chrComFM, chrProductsMetropolis4500=chrProductsMetropolis4500, chrComActions=chrComActions, chrComIfSonet=chrComIfSonet, chrComHW=chrComHW, chrComPmEth=chrComPmEth, chrComIf=chrComIf, PYSNMP_MODULE_ID=chromatis, chrComProtectionGroup=chrComProtectionGroup, chrComTrap=chrComTrap, chrComCompressData=chrComCompressData, chrComNetwork=chrComNetwork, chrCommon=chrCommon, chrComConfigVersion=chrComConfigVersion, chrComNetClockSync=chrComNetClockSync, chrComProtectSinglePath=chrComProtectSinglePath, chrComIfAtm=chrComIfAtm, chrComPmOptics=chrComPmOptics, chrComProtectionVc=chrComProtectionVc, chrComAtmVpl=chrComAtmVpl, chrComPM=chrComPM, chrComAtmVcl=chrComAtmVcl, chrComIfOptics=chrComIfOptics, chrComProtectionVp=chrComProtectionVp, chrProductsMetropolis2000=chrProductsMetropolis2000, chromatis=chromatis, chrComPmSonet=chrComPmSonet, chrComSwVersion=chrComSwVersion, chrComProtectEquip=chrComProtectEquip, chrComHwNe=chrComHwNe, chrComIfEthernet=chrComIfEthernet, chrComAccess=chrComAccess, chrProductsMetropolis2500=chrProductsMetropolis2500, chrComProtection=chrComProtection, chrProducts=chrProducts, chrComIfDS3=chrComIfDS3, chrComPmAtm=chrComPmAtm, chrProductsMetropolis4000=chrProductsMetropolis4000, chrComPmDs3=chrComPmDs3, chrComAtm=chrComAtm)
|
from rest_framework.generics import ListAPIView, RetrieveAPIView, RetrieveUpdateDestroyAPIView, ListCreateAPIView
from rest_framework.filters import SearchFilter, OrderingFilter
from book.api.pagination import BookPagination
from book.api.serializers import BookSerializer, BookCreateSerializer, BookUpdateDeleteSerializer
from book.api.models import Book
from library.permissions import IsAdmin
class BookListApiView(ListAPIView):
serializer_class = BookSerializer
filter_backends = [SearchFilter, OrderingFilter]
search_filters = ['title', 'content']
pagination_class = BookPagination
def get_queryset(self):
queryset = Book.objects.all()
return queryset
class BookDetailApiView(RetrieveAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
lookup_field = 'slug'
class BookCreateApiView(ListCreateAPIView):
permission_classes = [IsAdmin]
queryset = Book.objects.all()
serializer_class = BookCreateSerializer
def perform_create(self, serializer):
serializer.save(user = self.request.user)
class BookUpdateDeleteApiView(RetrieveUpdateDestroyAPIView):
permission_classes = [IsAdmin]
queryset = Book.objects.all()
serializer_class = BookUpdateDeleteSerializer
lookup_field = 'pk'
def perform_create(self, serializer):
serializer.save(user = self.request.user)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 15:36:45 2019
@author: Guilherme
"""
import numpy as np
def fact(n):
x = 1
i = 2
while i <= n:
x =x*i
i= i+1
return(x)
|
# Add reference to Part_03 (assuming the code is executed from Part_04 folder)
import sys
sys.path.insert(1, '../Part_03/')
from inference import Inference as model
from image_helper import ImageHelper as imgHelper
from camera import Camera as camera
if __name__ == "__main__":
# Load and prepare model
model_file_path = '../Models/01_model.tflite'
labels_file_path = '../Models/02_labels.txt'
# Initialize model
ai_model = model(model_file_path, labels_file_path)
# Initialize camera
camera_capture = camera()
# Capture frame and perform inference
camera_frame = camera_capture.capture_frame(False)
score_threshold = 0.5
results = ai_model.detect_objects(camera_frame, score_threshold)
# Display results
imgHelper.display_image_with_detected_objects(camera_frame, results)
|
import serial
import plotly.plotly as py
import plotly.graph_objs as go
port = serial.Serial('/dev/cu.usbserial-DN04AX6V') # open the serial port (MAC/Linux)
chars = []
vals = []
while 1:
c = port.read()
if c==b'\n':
val = ''.join(chars).strip()
chars = []
vals.append(float(val))
print(val)
else:
chars.append(c.decode('utf-8'))
if len(vals)>100:
trace0 = go.Scatter(x=list(range(len(vals))), y=vals )
data = [trace0]
py.plot(data, filename = 'range-data', auto_open=False)
vals = []
port.close() # shut it down
|
"""
Defines arguments manipulation utilities, like checking if an argument is iterable, flattening a nested arguments list, etc.
These utility functions can be used by other util modules and are imported in util's main namespace for use by other pymel modules
"""
import sys
import operator
import itertools
from collections import deque as _deque
from pymel.util.utilitytypes import ProxyUnicode
class ChangedKey(object):
def __eq__(self, other):
pass
def __init__(self, oldVal, newVal):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
__dict__ = None
__weakref__ = None
class AddedKey(object):
def __eq__(self, other):
pass
def __init__(self, newVal):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
__dict__ = None
__weakref__ = None
class RemovedKey(object):
def __eq__(self, other):
pass
def __init__(self, oldVal):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
__dict__ = None
__weakref__ = None
def expandArgs(*args, **kwargs):
"""
'Flattens' the arguments list: recursively replaces any iterable argument in *args by a tuple of its
elements that will be inserted at its place in the returned arguments.
By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.
:Keywords:
depth : int
will specify the nested depth limit after which iterables are returned as they are
type
for type='list' will only expand lists, by default type='all' expands any iterable sequence
postorder : bool
will return elements depth first, from leaves to roots
breadth : bool
will return elements breadth first, roots, then first depth level, etc.
For a nested list represent trees::
a____b____c
| |____d
e____f
|____g
preorder(default) :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 )
('a', 'b', ['c', 'd'], 'e', 'f', 'g')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] )
('a', 'b', 'c', 'd', 'e', 'f', 'g')
postorder :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1)
('b', ['c', 'd'], 'a', 'f', 'g', 'e')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True)
('c', 'd', 'b', 'a', 'f', 'g', 'e')
breadth :
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True)
('a', 'e', 'b', ['c', 'd'], 'f', 'g')
>>> expandArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True)
('a', 'e', 'b', 'f', 'g', 'c', 'd')
Note that with default depth (unlimited) and order (preorder), if passed a pymel Tree
result will be the equivalent of doing a preorder traversal : [k for k in iter(theTree)]
"""
pass
def clsname(x):
pass
def convertListArgs(args):
pass
def reorder(x, indexList=[], indexDict={}):
"""
Reorder a list based upon a list of positional indices and/or a dictionary of fromIndex:toIndex.
>>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']
>>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4
['one', 'four', 'zero', 'two', 'three', 'five', 'six']
>>> reorder( l, [1, None, 4] ) # None can be used as a place-holder
['one', 'zero', 'four', 'two', 'three', 'five', 'six']
>>> reorder( l, [1, 4], {5:6} ) # remapping via dictionary: move the value at index 5 to index 6
['one', 'four', 'zero', 'two', 'three', 'six', 'five']
"""
pass
def isMapping(obj):
"""
Returns True if an object is a mapping (dictionary) type, otherwise returns False.
same as `operator.isMappingType`
:rtype: bool
"""
pass
def pairIter(sequence):
"""
Returns an iterator over every 2 items of sequence.
ie, [x for x in pairIter([1,2,3,4])] == [(1,2), (3,4)]
If sequence has an odd number of items, the last item will not be returned in a pair.
"""
pass
def isSequence(obj):
"""
same as `operator.isSequenceType`
:rtype: bool
"""
pass
def postorderIterArgs(limit=1000, testFn='<function isIterable>', *args):
"""
iterator doing a postorder expansion of args
"""
pass
def listForNone(res):
"""
returns an empty list when the result is None
"""
pass
def breadth(iterable, testFn='<function isIterable>', limit=1000):
"""
iterator doing a breadth first expansion of args
"""
pass
def mergeCascadingDicts(from_dict, to_dict, allowDictToListMerging=False, allowNewListMembers=False):
"""
recursively update to_dict with values from from_dict.
if any entries in 'from_dict' are instances of the class RemovedKey,
then the key containing that value will be removed from to_dict
if allowDictToListMerging is True, then if to_dict contains a list,
from_dict can contain a dictionary with int keys which can be used to
sparsely update the list.
if allowNewListMembers is True, and allowDictToListMerging is also True,
then if merging an index into a list that currently isn't long enough to
contain that index, then the list will be extended to be long enough (with
None inserted in any intermediate indices)
Note: if using RemovedKey objects and allowDictToList merging, then only
indices greater than all of any indices updated / added should be removed,
because the order in which items are updated / removed is indeterminate.
"""
pass
def postorder(iterable, testFn='<function isIterable>', limit=1000):
"""
iterator doing a postorder expansion of args
"""
pass
def isScalar(obj):
"""
Returns True if an object is a number or complex type, otherwise returns False.
:rtype: bool
"""
pass
def isIterable(obj):
"""
Returns True if an object is iterable and not a string or ProxyUnicode type, otherwise returns False.
:rtype: bool
"""
pass
def breadthIterArgs(limit=1000, testFn='<function isIterable>', *args):
"""
iterator doing a breadth first expansion of args
"""
pass
def setCascadingDictItem(dict, keys, value):
pass
def preorder(iterable, testFn='<function isIterable>', limit=1000):
"""
iterator doing a preorder expansion of args
"""
pass
def postorderArgs(limit=1000, testFn='<function isIterable>', *args):
"""
returns a list of a postorder expansion of args
"""
pass
def preorderIterArgs(limit=1000, testFn='<function isIterable>', *args):
"""
iterator doing a preorder expansion of args
"""
pass
def getCascadingDictItem(dict, keys, default={}):
pass
def iterateArgs(*args, **kwargs):
"""
Iterates through all arguments list: recursively replaces any iterable argument in *args by a tuple of its
elements that will be inserted at its place in the returned arguments.
By default will return elements depth first, from root to leaves. Set postorder or breadth to control order.
:Keywords:
depth : int
will specify the nested depth limit after which iterables are returned as they are
type
for type='list' will only expand lists, by default type='all' expands any iterable sequence
postorder : bool
will return elements depth first, from leaves to roots
breadth : bool
will return elements breadth first, roots, then first depth level, etc.
For a nested list represent trees::
a____b____c
| |____d
e____f
|____g
preorder(default) :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1 ))
('a', 'b', ['c', 'd'], 'e', 'f', 'g')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'] ))
('a', 'b', 'c', 'd', 'e', 'f', 'g')
postorder :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True, limit=1 ))
('b', ['c', 'd'], 'a', 'f', 'g', 'e')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], postorder=True))
('c', 'd', 'b', 'a', 'f', 'g', 'e')
breadth :
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], limit=1, breadth=True))
('a', 'e', 'b', ['c', 'd'], 'f', 'g')
>>> tuple(k for k in iterateArgs( 'a', ['b', ['c', 'd']], 'e', ['f', 'g'], breadth=True))
('a', 'e', 'b', 'f', 'g', 'c', 'd')
Note that with default depth (-1 for unlimited) and order (preorder), if passed a pymel Tree
result will be the equivalent of using a preorder iterator : iter(theTree)
"""
pass
def compareCascadingDicts(dict1, dict2, encoding=None, useAddedKeys=False, useChangedKeys=False):
"""
compares two cascading dicts
Parameters
----------
dict1 : dict, list, or tuple
the first object to compare
dict2 : dict, list, or tuple
the second object to compare
encoding : `str` or None or False
controls how comparisons are made when one value is a str, and one is a
unicode; if None, then comparisons are simply made with == (so ascii
characters will compare equally); if the value False, then unicode and
str are ALWAYS considered different - ie, u'foo' and 'foo' would not be
considered equal; otherwise, it should be the name of a unicode
encoding, which will be applied to the unicode string before comparing
useAddedKeys : bool
if True, then similarly to how 'RemovedKey' objects are used in the
returned diferences object (see the Returns section), 'AddedKey' objects
are used for keys which exist in dict2 but not in dict1; this allows
a user to distinguish, purely by inspecting the differences dict, which
keys are brand new, versus merely changed; mergeCascadingDicts will
treat AddedKey objects exactly the same as though they were their
contents - ie, useAddedKeys should make no difference to the behavior
of mergeCascadingDicts
useChangedKeys : bool
if True, then similarly to how 'RemovedKey' objects are used in the
returned diferences object (see the Returns section), 'ChangedKey'
objects are used for keys which exist in both dict1 and dict2, but with
different values
Returns
-------
both : `set`
keys that were present in both (non-recursively)
(both, only1, and only2 should be discrete partitions of all the keys
present in both dict1 and dict2)
only1 : `set`
keys that were present in only1 (non-recursively)
only2 : `set`
keys that were present in only2 (non-recursively)
differences : `dict`
recursive sparse dict containing information that was 'different' in
dict2 - either not present in dict1, or having a different value in
dict2, or removed in dict2 (in which case an instance of 'RemovedKey'
will be set as the value in differences)
Values that are different, and both dictionaries, will themselves have
sparse entries, showing only what is different
The return value should be such that if you do if you merge the
differences with d1, you will get d2.
"""
pass
def breadthArgs(limit=1000, testFn='<function isIterable>', *args):
"""
returns a list of a breadth first expansion of args
"""
pass
def isNumeric(obj):
"""
Returns True if an object is a number type, otherwise returns False.
:rtype: bool
"""
pass
def sequenceToSlices(intList, sort=True):
"""
convert a sequence of integers into a tuple of slice objects
"""
pass
def preorderArgs(limit=1000, testFn='<function isIterable>', *args):
"""
returns a list of a preorder expansion of args
"""
pass
def izip_longest(*args, **kwds):
pass
|
import datetime
import os
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from utils.combine import merge_by_subject
from visualize.all_tasks import save_plot
import numpy as np
def add_fps_subject_level(data_subject, data_trial):
grouped = data_trial \
.groupby(['run_id'], as_index=False) \
.agg(fps=('fps', 'mean'))
data_subject = merge_by_subject(data_subject, grouped, 'fps')
plt.hist(data_subject['fps'], bins=20)
plt.rc('font', size=10)
save_plot(file_name='fps_participants.png',
path=os.path.join('results', 'plots', 'fps'))
plt.close()
return data_subject
def add_max_trials(data_subject, data_trial):
grouped = data_trial \
.groupby(['run_id'], as_index=False) \
.agg(max_trial=('trial_index', 'max'))
data_subject = merge_by_subject(data_subject, grouped, 'max_trial')
example = data_subject[['run_id', 'prolificID', 'max_trial']].head(5)
print(f"""data_subject: Added max_trial: \n """
f"""{example} \n""")
return data_subject
def add_window(data_subject, data_trial):
grouped = data_trial \
.groupby(["run_id"], as_index=False) \
.agg(window_x=('window_width', 'max'),
window_y=('window_height', 'max'))
grouped['window'] = np.sqrt(
grouped['window_x'] ** 2 + grouped['window_y'] ** 2)
data_subject = merge_by_subject(data_subject, grouped,
'window', 'window_x', 'window_y')
return data_subject
def add_glasses_binary(data_subject):
data_subject['glasses_binary'] = data_subject['sight'] \
.replace({'contactLenses': 0,
'glasses': 1,
'notCorrected': 0,
'perfectSight': 0})
n_missing = len(data_subject.loc[
pd.isna(data_subject['glasses_binary']), :])
example = pd.crosstab(
index=data_subject['glasses_binary'],
columns="count")
print(
f"""\n"""
f"""data_subject: Added glasses_binary: \n"""
f"""N Missing values: {n_missing} \n \n"""
f"""{example} \n""")
return data_subject
def add_recorded_date(data_subject, data_trial):
output = []
for subject in tqdm(
data_trial['run_id'].unique(),
desc='Add recorded date for each participant: '):
this_subject = data_trial.loc[data_trial['run_id'] == subject] \
.reset_index(drop=True)
date_time_obj = datetime.datetime.strptime(
this_subject.loc[0, 'recorded_at'], '%Y-%m-%d %H:%M:%S')
output.append([this_subject.loc[0, 'run_id'], date_time_obj.date()])
output = pd.DataFrame(output, columns=['run_id', 'recorded_date'])
if 'recorded_date' in data_subject.columns:
data_subject = data_subject.drop(columns=['recorded_date'])
data_subject = data_subject.merge(output, on='run_id', how='left')
example = data_subject.loc[
:, ['run_id', 'prolificID', 'recorded_date']
].head(5)
print(
f"""data_subject: Added recorded_date: \n"""
f"""{example} \n""")
return data_subject
def add_employment_status(data_subject):
data_subject['employment_status'] = data_subject['Employment Status'] \
.replace({
"""Not in paid work (e.g. homemaker', 'retired or disabled)""":
'not_in_paid_work',
'DATA EXPIRED': 'Other',
'Unemployed (and job seeking)': 'not_in_paid_work',
'Due to start a new job within the next month': 'Other'})
example = pd.crosstab(
index=data_subject['employment_status'],
columns="count")
print(
f"""data_subject: Added employment_status: \n"""
f"""{example} \n""")
return data_subject
def add_full_time_binary(data_subject):
data_subject['fullTime_binary'] = data_subject['Employment Status'] \
.replace([
'Other', 'Part-Time',
"Not in paid work (e.g. homemaker', 'retired or disabled)",
'Unemployed (and job seeking)', 'DATA EXPIRED',
'Due to start a new job within the next month'], 0) \
.replace(['Full-Time'], 1)
example = pd.crosstab(
index=data_subject['fullTime_binary'],
columns="count")
print(
f"""data_subject: Added full_time_binary: \n"""
f"""{example} \n""")
return data_subject
|
#!/usr/bin/env python
# Author: Dogacan S. Ozturk
# Import default Python libraries.
import os
import sys
from glob import glob
import tables
import numpy as np
import datetime as dt
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
myFmt = mdates.DateFormatter('%H:%M')
# Import custom Python libraries.
sys.path.insert(0, '../Code/')
from spacepy.pybats import gitm
import hime_helper_functions
from downsample_data import downsample_pfisr_data
from merge_potentials import merge_pfisr_with_gitm_potentials
# Enter filename for the PFISR 2D VEF estimates.
filename = '../Examples/Files/PFISR_Data/20191026.002_lp_1min-fitcal_2dVEF_001001-geo600km.h5'
# Enter desired grid resolution.
gridRes = 0.75
# Downsample the grid and calculate the potential differences.
PhiX, PhiY, Ex_downsampled, Ey_downsampled, Ex_calculated, Ey_calculated, XnewGrids, YnewGrids, experimentTimes = downsample_pfisr_data(filename, gridRes)
# Define the path to global potential values.
weimerSimulationList = glob('../Examples/Files/Simulations/3D*.bin')
# Define the merge parameter.
mergeParameter = 0.6
# Set plot potentials to True for saving plots.
plotPotentials = True
# Set save potentials to True for saving output ASCII files.
savePotentials = True
# Merge the local and global potentials together.
phiXhime, phiYhime, himeEx, himeEy, xHimeMesh, yHimeMesh, himeTimes = merge_pfisr_with_gitm_potentials(PhiX, PhiY, XnewGrids, YnewGrids, experimentTimes, weimerSimulationList, gridRes, mergeParameter, plotPotentials, savePotentials)
|
from __future__ import division
from vectors import vec3, cross
from math import cos, sin, pi
from itertools import chain
def memodict(f):
""" Memoization decorator for a function taking a single argument """
class memodict(dict):
def __missing__(self, key):
ret = self[key] = f(key)
return ret
return memodict().__getitem__
def memoize(f):
""" Memoization decorator for a function taking one or more arguments. """
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__
def flip(triangles):
#flip the vertex order of a list of triangles
return map(lambda x : x[::-1], triangles)
def circle(r, n, theta = 0):
#return a list of n points describing a circle of radius r
return tuple(vec3(r*cos(2*pi*i/n + theta), r*sin(2*pi*i/n + theta), 0) for i in range(n))
def naive_triangulisation(shape):
return [[shape[0], shape[n+1], shape[n]] for n in range(1, len(shape) - 1)]
def polytotri(shape):
z = shape[0].z
from p2t import CDT, Point, Triangle
polyline = [Point(p.x, p.y) for p in shape]
cdt = CDT(polyline)
triangles = cdt.triangulate()
points = [[t.a, t.b, t.c] for t in triangles]
return [[vec3(p.x, p.y, z) for p in t] for t in points]
def lerp(a,b,l):
return a*(1-l) + b*l
def lerp_shapes(a,b,l):
return [i*(1-l) + j*l for i,j in zip(a,b)]
#@memoize
def kochify(shape, iterations, max_iterations = None):
#applys one iteration of the koch snowflake to an arbitray pointlist.
if max_iterations == None: max_iterations = iterations
if max_iterations <= 0: return tuple(shape)
if 1 <= iterations: l = 1
if 0 < iterations < 1: l = iterations
if iterations <= 0: l = 0
newshape = []
for i in range(len(shape)):
a = shape[i]
b = shape[(i+1)%len(shape)]
v = b - a
newshape.append(a)
newshape.append(lerp(a,b,1/3))
newshape.append(lerp(a,b,1/2) + l * 1/3 * vec3(v.y, -v.x, 0))
newshape.append(lerp(a,b,2/3))
return tuple(kochify(newshape, iterations - 1, max_iterations - 1))
def translate(shape, vector):
return tuple(point + vector for point in shape)
def join(a, b):
#assert(len(a) == len(b))
def half_triangles(a,b,shift): return [[ b[i], a[i], b[(i+shift) % len(a)] ] for i in range(len(a))]
#return zip(b, a, chain(b[1:], [b[0],])) + zip(a, b, chain([a[-1],],a[:-1]))
return half_triangles(a,b,+1) + half_triangles(b,a,-1)
def normal(triangle):
a,b,c = triangle
return cross(c-a, b-a)
def extrude(shape_func, samples = 50):
shape_func = memodict(shape_func)
triangles = []
shapes = [shape_func(i/samples) for i in range(samples+1)]
for i in range(samples):
triangles += join(shapes[i], shapes[i+1])
triangles += flip(polytotri(shapes[0]))# + polytotri( shapes[-1])
return triangles
def triangles_to_stl(triangles):
s = """
solid Model
{}
endsolid Model
"""
vertex = \
"""
facet normal {n[0]:.5f} {n[1]:.5f} {n[2]:.5f}
outer loop
vertex {t[0][0]:.5f} {t[0][1]:.5f} {t[0][2]:.5f}
vertex {t[1][0]:.5f} {t[1][1]:.5f} {t[1][2]:.5f}
vertex {t[2][0]:.5f} {t[2][1]:.5f} {t[2][2]:.5f}
endloop
endfacet
"""
vertices = "".join(vertex.format(n = normal(t), t = t) for t in triangles)
return s.format(vertices)
def triangles_to_binary_stl(triangles):
from struct import Struct, pack
header = b"\x00" * 80 + pack("<I", len(triangles))
out = header
for t in triangles:
#n = normal(t)
n = vec3(0,0,0)
data = list(n) + list(chain(*t)) + list([0,])
s = Struct("<" + "f" * 12 + "H")
out += s.pack(*data)
return out
def rotating_koch(i):
triangle = circle(sin(2*pi*i) + 1, 3, i*pi)
koch = kochify(triangle, 3)
return translate(koch, vec3(0,0,i*10))
def koch_to_circle(i):
samples = 3
iterations = 2
height = 3
radius = sin(0.8*pi*i)**2 + 0.2
lerp_function = i**2
spin = i* pi/6
c = circle(radius, samples * 4**iterations, spin)
koch = kochify(circle(radius, samples, spin), iterations)
l = lerp_shapes(c, koch, lerp_function)
return translate(l, [0,0,i*height])
def koch_circle_oscillations(i):
samples = 3
iterations = 2
height = 5
radius = (cos(2*pi*i)**2 + 1) / 2 if i < 0.5 else cos(2*pi*i)
lerp_function = sin(2*pi*i)**2
spin = i* pi/2
c = circle(radius, samples * 4**iterations, spin)
koch = kochify(circle(radius, samples, spin), iterations)
l = lerp_shapes(c, koch, lerp_function)
return translate(l, [0,0,i*height])
def koch_growth(i):
samples = 3
iterations = 4
height = 3
radius = sin(pi*i)**2 + 0.4*2*(0.5-i)**2 if i < 0.5 else sin(pi*i) + 0.3*2*(0.5-i)**2
lerp_function = i
spin = i* pi/3
c = circle(radius, samples * 4**iterations, spin)
koch = kochify(circle(radius, samples, spin), sin(pi*i)*iterations, iterations)
l = lerp_shapes(c, koch, lerp_function)
return translate(l, [0,0,i*height])
def koch_plant_pot(i):
samples = 8
iterations = 2
height = 100.0
g=0.575; b=0.6; c=0.46; d=0.41
radius = 75.0 * (g*sin(b*pi*i + c)**2 + d) / 2 / (g*sin(b*pi*1.0 + c)**2 + d)
n = 4.0
lerp_function = 0.6 * i**n * (1 - i)**n / 0.5**(2*n)
spin = i* pi/2
c = circle(radius, samples * 4**iterations, spin)
koch = kochify(circle(radius, samples, spin), iterations)
l = lerp_shapes(c, koch, lerp_function)
return translate(l, [0,0,i*height])
def nice_swirl(i):
#this one is really nice, it has small ribs going up the side
samples = 50
iterations = 2
height = 100.0
g=0.575; b=0.6; c=0.46; d=0.41
radius = 74.0 * (g*sin(b*pi*i + c)**2 + d) / 2 / (g*sin(b*pi*1.0 + c)**2 + d)
spin = i* pi/2
n = 4.0
lerp_function = 0.6 * i**n * (1 - i)**n / 0.5**(2*n)
c = circle(radius, samples * 4**iterations, spin)
koch = kochify(circle(radius, samples, spin), iterations*lerp_function, max_iterations = iterations)
return translate(koch, [0,0,i*height])
def smooth_bulb(i):
from math import sqrt
samples = 100
def top_radius(i):
b=0.56; d=0.71
c = pi/2.0*(1-b)
return 0.5 * (sin(b*pi*i + c)**2 + d) / (sin(b*pi*0.5 + c)**2 + d)
def bottom_radius(i):
j = 0.36
return sqrt(0.5**2 - (i - 0.5)**2/(1+j))
radius = 74.0 / 2.0 * (top_radius(i) if i > 0.5 else bottom_radius(i)) / top_radius(1.0)
spin = i* pi/2
height = 74.0 / 2.0 * 1.0 / top_radius(1.0)
c = circle(radius, samples, spin)
return translate(c, [0,0,i*height])
def make_stl():
with open("smooth_bulb.stl", 'w') as stl:
surface = extrude(bulb, samples = 50)
stl.write(triangles_to_binary_stl(surface))
if __name__ == '__main__':
#import cProfile
#cProfile.run("make_stl()")
make_stl()
|
# Generated by Django 2.1.15 on 2021-01-10 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='student',
name='student_class',
field=models.CharField(max_length=30),
),
]
|
import pandas as pd
def getHouseTransactions():
'''
Grabs a list of stock transactions made by various members of the
United States House of Representatives.
Data has been croud sourced from the House Stock Watcher website.
Link: https://housestockwatcher.com/
'''
URL = 'https://house-stock-watcher-data.s3-us-west-2.amazonaws.com/data/all_transactions.json'
houseDF = pd.read_json(URL)
houseDF.to_csv('Data/Congress/houseTransactions.csv' )
print(houseDF)
def getSenateTransactions():
'''
Grabs a list of stock transactions made by various members of the
United States Senate.
Data has been croud sourced from the Senate Stock Watcher website.
Link: https://housestockwatcher.com/
'''
URL = 'https://senate-stock-watcher-data.s3-us-west-2.amazonaws.com/aggregate/all_transactions.json'
senateDF = pd.read_json(URL)
senateDF.to_csv('Data/Congress/senateTransactions.csv' )
print(senateDF)
def congress():
getHouseTransactions()
getSenateTransactions()
def main():
print('')
getHouseTransactions()
getSenateTransactions()
if __name__ == "__main__":
main()
|
from six.moves import urllib
from mlflow.store.file_store import FileStore
from mlflow.store.local_artifact_repo import LocalArtifactRepository
class PluginFileStore(FileStore):
"""FileStore provided through entrypoints system"""
def __init__(self, store_uri=None, artifact_uri=None):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
self.is_plugin = True
super(PluginFileStore, self).__init__(path, artifact_uri)
class PluginLocalArtifactRepository(LocalArtifactRepository):
"""LocalArtifactRepository provided through plugin system"""
is_plugin = True
|
import logging
import os
from pathlib import Path
from typing import List, Dict
import yaml
from piper.config.pipe import Pipe
PIPE_FILE = "pipe.yml"
logger = logging.getLogger(__name__)
def _read_pipe(location: str) -> Pipe:
# Read the pipe file
path = os.path.join(location, PIPE_FILE)
with open(path) as f:
yml = yaml.load(f, Loader=yaml.SafeLoader) or {}
return Pipe(location=location, yml=yml)
def _has_pipe(location: str) -> bool:
return os.path.isfile(os.path.join(location, PIPE_FILE))
def get_pipe_name(location: str) -> str:
return os.path.basename(os.path.normpath(location))
def read_all_pipes(location: str) -> Dict[str, Pipe]:
# Find the uppermost (parent) pipe file
uppermost = location
while _has_pipe(location):
uppermost = location
location = str(Path(location).parent)
# Read main pipe file and nested pipe files
pipes = {pipe.name: pipe for pipe in _read_pipes_from(uppermost)}
# Set the root context
for p in pipes.values():
p.root_context = uppermost
# Build dependencies
for name in pipes:
for pipe in pipes.values():
if name in pipe.dependencies:
pipe.fill_dependency_with_pipe(pipes[name])
return pipes
def _read_pipes_from(location: str) -> List[Pipe]:
# Read the local pipe
group = []
pipe = _read_pipe(location)
# Read all pipes recursively
pipes = [pipe]
for directory in os.listdir(location):
current = os.path.join(location, directory)
if _has_pipe(current):
children = _read_pipes_from(current)
group = group + children
pipes = pipes + children
# Set the nested group
pipe.group = group
return pipes
|
# -*- coding: utf-8 -*-
from sceptre.helpers import get_external_stack_name
class TestHelpers(object):
def test_get_external_stack_name(self):
result = get_external_stack_name("prj", "dev/ew1/jump-host")
assert result == "prj-dev-ew1-jump-host"
|
from django.contrib import admin
from planificacionfamiliarapp.models import PacienteInscripcion, PacienteSubSecuentePF
#filtros para admin
class PlanificacionFamiliarAdmin(admin.ModelAdmin):
list_display = ('paciente','fechaIngreso')
class PacienteSubSecuentePFAdmin(admin.ModelAdmin):
list_display = ('pacienteInscrito','fechaIngreso','tipoConsulta')
admin.site.register(PacienteInscripcion,PlanificacionFamiliarAdmin)
admin.site.register(PacienteSubSecuentePF,PacienteSubSecuentePFAdmin)
|
from django.db import models
from server.constants import CampusType, TransferType, UserType, ApplicationsStatus, ThesisLocaleType
class ActiveUserProfile(models.Model): #model used for filling the database with inital values
"""
Profile model for each User in the app.
Keep fields nullable to create a corresponding
UserProfile model isntance automatically once a User
model instance is created.
"""
username = models.CharField(max_length=13)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
user_choices = [
(UserType.STUDENT.value, 'Student'),
(UserType.SUPERVISOR.value, 'Supervisor'),
(UserType.HOD.value, 'Head of Department'), # all HODs are supervisors
(UserType.AD.value, 'Associate Dean'),
(UserType.PSD.value, 'PS-Division'),
]
campus_choices = [
(CampusType.GOA.value, 'Goa'),
(CampusType.HYD.value, 'Hyderabad'),
(CampusType.PILANI.value, 'Pilani'),
]
email = models.EmailField()
campus = models.IntegerField(choices=campus_choices, blank=True,
null=True)
contact = models.CharField(
blank=True, null=True, max_length=20,
help_text="Enter 10 digit contact number"
)
user_type = models.IntegerField(choices=user_choices, blank=True,
null=True)
cgpa = models.CharField(max_length=6, default="NA")
is_active_tms = models.BooleanField(default=False)
def __str__(self):
return str(self.email+str(self.user_type))
class PS2TSTransfer(models.Model):
"""
Model to store the information for
PS --> TS transfer related queries
"""
sub_type_choices = [
(TransferType.PS2TS.value, 'PS to TS (Single Degree)'),
(TransferType.PSPS2PSTS.value, 'PS-PS to PS-TS (Dual Degree)'),
(TransferType.PSPS2TSPS.value, 'PS-PS to TS-PS (Dual Degree)'),
(TransferType.TSPS2TSTS.value, 'TS-PS to TS-TS (Dual Degree)'),
]
thesis_locale_choices = [
(ThesisLocaleType.ON_CAMPUS.value, 'On Campus'),
(ThesisLocaleType.OFF_CAMPUS_INDIA.value, 'Off Campus (India)'),
(ThesisLocaleType.OFF_CAMPUS_ABROAD.value, 'Off Campus (Abroad)'),
(ThesisLocaleType.OFF_CAMPUS_INDUSTRY.value, 'Off Campus (Industry)'),
]
status_choices = [
(ApplicationsStatus.PENDING.value, 'Pending'),
(ApplicationsStatus.APPROVED.value, 'Approved'),
(ApplicationsStatus.REJECTED.value, 'Rejected'),
]
# linking the application with its applicant
applicant = models.OneToOneField(ActiveUserProfile, primary_key=True,
on_delete=models.CASCADE)
# corresponding on-campus supervisor
supervisor_email = models.EmailField()
# corresponding hod
hod_email = models.EmailField()
# sub-type for application; see choices above
sub_type = models.IntegerField(choices=sub_type_choices)
# other details required for the form
cgpa = models.DecimalField(max_digits=6, decimal_places=2)
thesis_locale = models.IntegerField(choices=thesis_locale_choices)
thesis_subject = models.CharField(max_length=150,
help_text='Broad area/Title of Thesis')
name_of_org = models.CharField(max_length=100,
help_text='Name of BITS Campus or Organization where thesis will be carried')
expected_deliverables = models.TextField(help_text='Expected outcome of thesis')
# fields to note the status of the application
is_supervisor_approved = models.IntegerField(
default=ApplicationsStatus.PENDING.value,
choices=status_choices
)
is_hod_approved = models.IntegerField(
default=ApplicationsStatus.PENDING.value,
choices=status_choices
)
is_ad_approved = models.IntegerField(
default=ApplicationsStatus.PENDING.value,
choices=status_choices
)
# comments from authorities
comments_from_supervisor = models.TextField(null=True, blank=True)
comments_from_hod = models.TextField(null=True, blank=True)
comments_from_ad = models.TextField(null=True, blank=True)
# date-time-stamp
application_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'PS to TS Application'
verbose_name_plural = 'PS to TS Applications'
class TS2PSTransfer(models.Model):
"""
Model to store the information for
TS --> PS transfer related queries
"""
# TS2PS = 0
# PSTS2PSPS = 1
# TSTS2TSPS = 2
sub_type_choices = [
(TransferType.TS2PS.value, 'TS to PS (Single Degree)'),
(TransferType.PSTS2PSPS.value, 'PS-TS to PS-PS (Dual Degree)'),
(TransferType.TSTS2TSPS.value, 'TS-TS to TS-PS (Dual Degree)'),
]
status_choices = [
(ApplicationsStatus.PENDING.value, 'Pending'),
(ApplicationsStatus.APPROVED.value, 'Approved'),
(ApplicationsStatus.REJECTED.value, 'Rejected'),
]
# linking application with its applicant
applicant = models.OneToOneField(ActiveUserProfile, primary_key=True,
on_delete=models.CASCADE)
# corresponding hod
hod_email = models.EmailField()
# sub-type for application; see choices above
sub_type = models.IntegerField(choices=sub_type_choices)
# other details required for the form
cgpa = models.DecimalField(max_digits=6, decimal_places=2)
reason_for_transfer = models.TextField()
name_of_org = models.CharField(max_length=100,
help_text='Name of BITS Campus or Organization where thesis was being carried')
# field to note the status of the application
is_hod_approved = models.IntegerField(
default=ApplicationsStatus.PENDING.value,
choices=status_choices
)
is_ad_approved = models.IntegerField(
default=ApplicationsStatus.PENDING.value,
choices=status_choices
)
# comments from authorities
comments_from_hod = models.TextField(null=True, blank=True)
comments_from_ad = models.TextField(null=True, blank=True)
# date-time-stamp
application_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'TS to PS Application'
verbose_name_plural = 'TS to PS Applications'
class DeadlineModel(models.Model):
deadline_PS2TS = models.DateTimeField(null = True, blank = True)
deadline_TS2PS = models.DateTimeField(null = True, blank = True)
is_active_PS2TS = models.BooleanField(default=False)
is_active_TS2PS = models.BooleanField(default=False)
message = models.TextField(null=True,blank=True)
# Create your models here.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.