code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python3
# Copyright (c) 2015 Davide Gessa
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from libcontractvm import Wallet, WalletExplorer, ConsensusManager
from forum import ForumManager
import sys
import time
consMan = ConsensusManager.ConsensusManager ()
consMan.bootstrap ("http://127.0.0.1:8181")
wallet = WalletExplorer.WalletExplorer (wallet_file='test.wallet')
srMan = ForumManager.ForumManager (consMan, wallet=wallet)
while True:
pollid = input ('Insert the id of the poll: ')
choice = input ('Insert the choice for the poll: ')
try:
print ('Broadcasted:', srMan.vote (pollid, choice))
except:
print ('Error.')
|
andreasscalas/dappforum
|
samples/vote.py
|
Python
|
mit
| 734
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com', ])
# END SITE CONFIGURATION
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += ['gunicorn', ]
INSTALLED_APPS += [
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
# 'django.contrib.sites',
'rest_framework',
# allauth
# 'allauth',
# 'allauth.account',
# 'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
# 'djcelery',
'celery',
# # local apps
'mainapp',
'action',
]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='fbstats <noreply@example.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[fbstats]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
ACCOUNT_EMAIL_VERIFICATION = 'optional'
|
bhanduroshan/fbstats-docker
|
config/settings/production.py
|
Python
|
mit
| 8,125
|
from path import Path
import zipfile
import urllib2
Path('tmp').mkdir_p()
for model_name in ('seq2seq','seq2tree'):
for data_name in ('jobqueries','geoqueries','atis'):
fn = '%s_%s.zip' % (model_name, data_name)
link = 'http://dong.li/lang2logic/' + fn
with open('tmp/' + fn, 'wb') as f_out:
f_out.write(urllib2.urlopen(link).read())
with zipfile.ZipFile('tmp/' + fn) as zf:
zf.extractall('./%s/%s/data/' % (model_name, data_name))
Path('tmp').rmtree()
|
donglixp/lang2logic
|
pull_data.py
|
Python
|
mit
| 486
|
# encoding: utf-8
# pylint: disable=too-few-public-methods,invalid-name,bad-continuation
"""
RESTful API Auth resources
--------------------------
"""
import logging
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus._http import HTTPStatus
from werkzeug import security
from app.extensions.api import Namespace
from . import schemas, parameters
from .models import db, OAuth2Client
log = logging.getLogger(__name__)
api = Namespace('auth', description="Authentication")
@api.route('/oauth2_clients/')
@api.login_required(oauth_scopes=['auth:read'])
class OAuth2Clients(Resource):
"""
Manipulations with OAuth2 clients.
"""
@api.parameters(parameters.ListOAuth2ClientsParameters())
@api.response(schemas.BaseOAuth2ClientSchema(many=True))
def get(self, args):
"""
List of OAuth2 Clients.
Returns a list of OAuth2 Clients starting from ``offset`` limited by
``limit`` parameter.
"""
oauth2_clients = OAuth2Client.query
if 'user_id' in args:
oauth2_clients = oauth2_clients.filter(
OAuth2Client.user_id == args['user_id']
)
return oauth2_clients.offset(args['offset']).limit(args['limit'])
@api.login_required(oauth_scopes=['auth:write'])
@api.parameters(parameters.CreateOAuth2ClientParameters())
@api.response(schemas.DetailedOAuth2ClientSchema())
@api.response(code=HTTPStatus.FORBIDDEN)
@api.response(code=HTTPStatus.CONFLICT)
@api.doc(id='create_oauth_client')
def post(self, args):
"""
Create a new OAuth2 Client.
Essentially, OAuth2 Client is a ``client_id`` and ``client_secret``
pair associated with a user.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to create a new OAuth2 client."
):
# TODO: reconsider using gen_salt
new_oauth2_client = OAuth2Client(
user_id=current_user.id,
client_id=security.gen_salt(40),
client_secret=security.gen_salt(50),
**args
)
db.session.add(new_oauth2_client)
return new_oauth2_client
|
frol/flask-restplus-server-example
|
app/modules/auth/resources.py
|
Python
|
mit
| 2,283
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InBrowserEditor.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
PaulFlorea/InBrowserPreviewer
|
manage.py
|
Python
|
mit
| 258
|
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import inspect
import numpy as np
from basic import PopulationDistribution
from pyec.config import Config
from pyec.history import LocalBestHistory
from pyec.space import Euclidean
class PSOHistory(LocalBestHistory):
"""A :class:`History` for Particle Swarm Optimization.
Rembers the local best and the velocities.
"""
def __init__(self, config):
super(PSOHistory, self).__init__(config)
self._positions = None
self._velocities = None
self.lowerv = None
self.upperv = None
self.attrs |= set(["_velocities", "_positions", "upperv", "lowerv"])
def velocities(self):
return self._velocities
def positions(self):
return self._positions
def updateVelocity(self):
popSize = self.config.populationSize
if self._velocities is None:
if self.config.initial is None:
self._velocities = np.array([self.config.space.random()
for i in xrange(popSize)])
elif (inspect.isclass(self.config.initial) and
isinstance(self.config.initial, PopulationDistribution)):
self._velocities = np.array([self.config.initial.batch(popSize)])
else:
self._velocities = np.array([self.config.initial()
for i in xrange(popSize)])
return
rp = np.outer(np.random.random_sample(popSize),
np.ones(self.config.dim))
rg = np.outer(np.random.random_sample(popSize),
np.ones(self.config.dim))
#print shape(rp), shape(self.bestLocal), shape(self.bestGlobal), shape(self.positions), shape(self.velocities)
bestLocal = np.array([x for x,s in self.localBestPop])
bestGlobal = self.best()[0]
velocities = (self.config.omega * self._velocities
+ self.config.phip * rp * (bestLocal - self._positions)
+ self.config.phig * rg * (bestGlobal - self._positions))
del self._velocities
self._velocities = np.maximum(self.lowerv,
np.minimum(self.upperv, velocities))
del rp
del rg
def internalUpdate(self, population):
super(PSOHistory, self).internalUpdate(population)
initialize = True
if self._positions is not None:
del self._positions
initialize = False
self._positions = np.array([x for x,s in population])
if hasattr(self.config.space, 'extent'):
lower, upper = self.config.space.extent()
self._positions = np.maximum(self._positions, lower)
self._positions = np.minimum(self._positions, upper)
if initialize:
self.upperv = self._positions.max(axis=0)
self.lowerv = self._positions.min(axis=0)
self.updateVelocity()
class ParticleSwarmOptimization(PopulationDistribution):
"""Particle Swarm Optimization.
Config parameters
* omega -- The decay factor for velocities
* phig -- The global best component in velocity update
* phip -- The local best component in velocity update
"""
config = Config(history=PSOHistory,
omega=-.5,
phig=2.0,
phip=2.0)
def __init__(self, **kwargs):
super(ParticleSwarmOptimization, self).__init__(**kwargs)
if self.config.space.type != np.ndarray:
raise ValueError("Space must have type numpy.ndarray")
def compatible(self, history):
return isinstance(history, PSOHistory)
def batch(self, popSize):
positions = self.history.positions() + self.history.velocities()
return positions
|
hypernicon/pyec
|
pyec/distribution/pso.py
|
Python
|
mit
| 4,812
|
__all__ = ['chatcommand', 'execute_chat_command', 'save_matchsettings', '_register_chat_command']
import functools
import inspect
from .events import eventhandler, send_event
from .log import logger
from .asyncio_loop import loop
_registered_chat_commands = {} # dict of all registered chat commands
async def execute_chat_command(server, player, cmd):
#if not player.is_admin():
#r = check_rights(player)
args = cmd.split(' ')
if args[len(args) - 1] is '':
del args[len(args) - 1]
if args[0] in _registered_chat_commands:
try:
if len(args) == 1:
server.run_task(_registered_chat_commands[args[0]](server, player))
else:
server.run_task(_registered_chat_commands[args[0]](server, player, *args[1:]))
except Exception as exp:
server.chat_send_error('fault use of chat command: ' + args[0], player)
server.chat_send_error(str(exp), player)
server.chat_send('use /help to see available chat commands', player)
raise
else:
server.chat_send_error('unknown chat command: ' + args[0], player)
server.chat_send('use /help to see available chat commands', player)
def _register_chat_command(chat_command, function):
if chat_command not in _registered_chat_commands:
_registered_chat_commands[chat_command] = function
else:
logger.error('chatcommand ' + "'" + chat_command + "'" + ' already registered to ' + str(function))
return False
def _unregister_chat_command(chat_command):
if chat_command not in _registered_chat_commands:
raise 'chat command not registered'
else:
del _registered_chat_commands[chat_command]
# @chatcommand decorator
def chatcommand(cmd):
def chatcommand_decorator(func):
if _register_chat_command(cmd, func) is False:
return
module = inspect.getmodule(func)
logger.debug('chatcommand ' + "'" + cmd + "' connected to " + str(func) + ' in module ' + str(module))
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return chatcommand_decorator
@eventhandler('ManiaPlanet.PlayerChat')
async def _on_player_chat(server, callback):
p = server.player_from_login(callback.login)
# ignore normal chat
if not callback.isCommand:
if p is not None:
send_event(server, 'pie.PlayerChat', p)
return
server.run_task(execute_chat_command(server, p, callback.text))
@chatcommand('/help')
async def cmd_help(server, player):
"""list all chat commands"""
server.chat_send('help:', player)
for cmd in _registered_chat_commands:
if _registered_chat_commands[cmd].__doc__ is None:
docstr = 'no description set'
else:
docstr = _registered_chat_commands[cmd].__doc__
server.chat_send(cmd + ' - ' + docstr, player)
async def save_matchsettings(server, filename = None):
await server.rpc.SaveMatchSettings('MatchSettings\\' + server.config.matchsettings)
@chatcommand('/savematchsettings')
async def cmd_savematchsettings(server, player):
await save_matchsettings(server)
server.chat_send('matchsettings saved: ' + server.config.matchsettings)
@chatcommand('/shutdown')
async def cmd_shutdown(server, player):
await server.chat_send_wait('pie shutdown')
loop.stop()
@chatcommand('/players')
async def cmd_players(server, player):
for player in server.players:
server.chat_send(server.players[player].nickname)
|
juergenz/pie
|
src/pie/chat_commands.py
|
Python
|
mit
| 3,649
|
#! /usr/bin/python3
"""Callback a callable asset."""
import struct
import decimal
D = decimal.Decimal
from . import (util, config, exceptions, litecoin, util)
from . import order
FORMAT = '>dQ'
LENGTH = 8 + 8
ID = 21
def validate (db, source, fraction, asset, block_time, block_index, parse):
cursor = db.cursor()
problems = []
# TODO
if not config.TESTNET:
problems.append('callbacks are currently disabled on mainnet')
return None, None, None, problems
# TODO
if fraction > 1:
problems.append('fraction greater than one')
elif fraction <= 0:
problems.append('non‐positive fraction')
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?)''', ('valid', asset)))
if not issuances:
problems.append('no such asset, {}.'.format(asset))
return None, None, None, problems
else:
last_issuance = issuances[-1]
if last_issuance['issuer'] != source:
problems.append('not asset owner')
return None, None, None, problems
if not last_issuance['callable']:
problems.append('uncallable asset')
return None, None, None, problems
elif last_issuance['call_date'] > block_time: problems.append('before call date')
call_price = round(last_issuance['call_price'], 6) # TODO: arbitrary
divisible = last_issuance['divisible']
if not divisible: # Pay per output unit.
call_price *= config.UNIT
# If parsing, unescrow all funds of asset. (Order of operations is
# important here.)
if parse:
# Cancel pending order matches involving asset.
cursor.execute('''SELECT * from order_matches \
WHERE status = ? AND (forward_asset = ? OR backward_asset = ?)''', ('pending', asset, asset))
for order_match in list(cursor):
order.cancel_order_match(db, order_match, 'cancelled', block_index)
# Cancel open orders involving asset.
cursor.execute('''SELECT * from orders \
WHERE status = ? AND (give_asset = ? OR get_asset = ?)''', ('open', asset, asset))
for order_element in list(cursor):
order.cancel_order(db, order_element, 'cancelled', block_index)
# Calculate callback quantities.
holders = util.holders(db, asset)
outputs = []
for holder in holders:
# If composing (and not parsing), predict funds to be returned from
# escrow (instead of cancelling open offers, etc.), by *not* skipping
# listing escrowed funds here.
if parse and holder['escrow']:
continue
address = holder['address']
address_quantity = holder['address_quantity']
if address == source or address_quantity == 0: continue
callback_quantity = int(address_quantity * fraction) # Round down.
fraction_actual = callback_quantity / address_quantity
outputs.append({'address': address, 'address_quantity': address_quantity, 'callback_quantity': callback_quantity, 'fraction_actual': fraction_actual})
callback_total = sum([output['callback_quantity'] for output in outputs])
if not callback_total: problems.append('nothing called back')
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, config.XPT)))
if not balances or balances[0]['quantity'] < (call_price * callback_total):
problems.append('insufficient funds')
cursor.close()
return call_price, callback_total, outputs, problems
def compose (db, source, fraction, asset):
call_price, callback_total, outputs, problems = validate(db, source, fraction, asset, util.last_block(db)['block_time'], util.last_block(db)['block_index'], parse=False)
if problems: raise exceptions.CallbackError(problems)
print('Total quantity to be called back:', util.devise(db, callback_total, asset, 'output'), asset)
asset_id = util.asset_id(asset)
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, fraction, asset_id)
return (source, [], data)
def parse (db, tx, message):
callback_parse_cursor = db.cursor()
# Unpack message.
try:
if len(message) != LENGTH:
raise exceptions.UnpackError
fraction, asset_id = struct.unpack(FORMAT, message)
asset = util.asset_name(asset_id)
status = 'valid'
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
fraction, asset = None, None
status = 'invalid: could not unpack'
if status == 'valid':
call_price, callback_total, outputs, problems = validate(db, tx['source'], fraction, asset, tx['block_time'], tx['block_index'], parse=True)
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
# Issuer.
assert call_price * callback_total == int(call_price * callback_total)
util.debit(db, tx['block_index'], tx['source'], config.XPT, int(call_price * callback_total), action='callback', event=tx['tx_hash'])
util.credit(db, tx['block_index'], tx['source'], asset, callback_total, action='callback', event=tx['tx_hash'])
# Holders.
for output in outputs:
assert call_price * output['callback_quantity'] == int(call_price * output['callback_quantity'])
util.debit(db, tx['block_index'], output['address'], asset, output['callback_quantity'], action='callback', event=tx['tx_hash'])
util.credit(db, tx['block_index'], output['address'], config.XPT, int(call_price * output['callback_quantity']), action='callback', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'fraction': fraction,
'asset': asset,
'status': status,
}
sql='insert into callbacks values(:tx_index, :tx_hash, :block_index, :source, :fraction, :asset, :status)'
callback_parse_cursor.execute(sql, bindings)
callback_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Paytokens/paytokensd
|
lib/callback.py
|
Python
|
mit
| 6,281
|
from __future__ import unicode_literals
import logging
# Logging configuration
log = logging.getLogger(__name__) # noqa
log.addHandler(logging.NullHandler()) # noqa
from netmiko.ssh_dispatcher import ConnectHandler
from netmiko.ssh_dispatcher import ssh_dispatcher
from netmiko.ssh_dispatcher import redispatch
from netmiko.ssh_dispatcher import platforms
from netmiko.ssh_dispatcher import FileTransfer
from netmiko.scp_handler import SCPConn
from netmiko.cisco.cisco_ios import InLineTransfer
from netmiko.ssh_exception import NetMikoTimeoutException
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko.ssh_autodetect import SSHDetect
from netmiko.base_connection import BaseConnection
# Alternate naming
NetmikoTimeoutError = NetMikoTimeoutException
NetmikoAuthError = NetMikoAuthenticationException
__version__ = '2.0.1'
__all__ = ('ConnectHandler', 'ssh_dispatcher', 'platforms', 'SCPConn', 'FileTransfer',
'NetMikoTimeoutException', 'NetMikoAuthenticationException',
'NetmikoTimeoutError', 'NetmikoAuthError', 'InLineTransfer', 'redispatch',
'SSHDetect', 'BaseConnection')
# Cisco cntl-shift-six sequence
CNTL_SHIFT_6 = chr(30)
|
fooelisa/netmiko
|
netmiko/__init__.py
|
Python
|
mit
| 1,198
|
from django.conf.urls import patterns, url
from .views import FeedList, ImportView, AddView
from .ajax import mark_as_read
urlpatterns = patterns(
'',
url(r'^$', FeedList.as_view(), name="feedme-feed-list"),
url(r'^by_category/(?P<category>[-\w]+)/$', FeedList.as_view(),
name='feedme-feed-list-by-category'),
url(r'^by_feed/(?P<feed_id>[-\w]+)/$', FeedList.as_view(),
name='feedme-feed-list-by-feed'),
url(r'^import/$', ImportView.as_view(),
name='feedme-import-google-takeout'),
url(r'^ajax/mark_as_read/$', mark_as_read,
name='feedme-mark-as-read-ajax'),
url(r'^ajax/add/$', AddView.as_view(), name='feedme-add-ajax'),
)
|
gotlium/django-feedme
|
feedme/urls.py
|
Python
|
mit
| 686
|
from modules.chart_module import ChartModule
import tornado.web
import logging
class LineChartModule(ChartModule):
def render(self, raw_data, keys, chart_id="linechart"):
self.chart_id = chart_id
self.chart_data = self.overtime_linechart_data(raw_data, keys)
return self.render_string('modules/linechart.html', chart_id=self.chart_id)
def overtime_linechart_data(self, raw_data, keys,
yearterms_key='fcqs_yearterms',
overtime_key='fcqs_overtime'):
def _overtime_builder(overtime_data, key):
def _transform_overtime_data(yearterm):
value = overtime_data[str(yearterm)][key]
roundto = {
'percent_a': 3,
'percent_b': 3,
'percent_c': 3,
'percent_d': 3,
'percent_f': 3,
'percent_incomplete': 3,
'average_grade': 3
}.get(key, 1)
if value is not None:
return round(value, roundto)
else:
return None
return _transform_overtime_data
def _overtime_dataset_builder(key):
color = {
'course_howmuchlearned_average': (247, 92, 3),
'course_challenge_average': (217, 3, 104),
'courseoverall_average': (130, 2, 99),
'course_priorinterest_average': (4, 167, 119),
'instructor_effectiveness_average': (247, 92, 3),
'instructor_respect_average': (217, 3, 104),
'instructoroverall_average': (130, 2, 99),
'instructor_availability_average': (4, 167, 119),
'TTT_instructoroverall_average': (197, 27, 125),
'OTH_instructoroverall_average': (233, 163, 201),
'TA_instructoroverall_average': (253, 224, 239),
'GR_courseoverall_average': (77, 146, 33),
'UD_courseoverall_average': (161, 215, 106),
'LD_courseoverall_average': (230, 245, 106),
'percent_a': (44, 123, 182),
'percent_b': (171, 217, 233),
'percent_c': (255, 255, 191),
'percent_d': (253, 174, 97),
'percent_f': (215, 25, 28),
'percent_incomplete': (48, 48, 48),
'average_grade': (48, 48, 48),
}.get(key, (48, 48, 48))
yaxis_id = {
'percent_a': 'y-axis-3',
'percent_b': 'y-axis-3',
'percent_c': 'y-axis-3',
'percent_d': 'y-axis-3',
'percent_f': 'y-axis-3',
'percent_incomplete': 'y-axis-3',
'average_grade': 'y-axis-2',
}.get(key, 'y-axis-1')
fill = {
'percent_a': True,
'percent_b': True,
'percent_c': True,
'percent_d': True,
'percent_f': True,
'percent_incomplete': True,
}.get(key, False)
label = {
'course_howmuchlearned_average': 'Amount Learned',
'course_challenge_average': 'Challenge',
'courseoverall_average': 'Course Overall',
'course_priorinterest_average': 'Prior Interest',
'instructor_effectiveness_average': 'Effectiveness',
'instructor_respect_average': 'Respect',
'instructoroverall_average': 'Instructor Overall',
'instructor_availability_average': 'Availability',
'TTT_instructoroverall_average': 'TTT instructors',
'OTH_instructoroverall_average': 'OTH instructors',
'TA_instructoroverall_average': 'TA instructors',
'GR_courseoverall_average': 'GR Course Overall',
'UD_courseoverall_average': 'UD Course Overall',
'LD_courseoverall_average': 'LD Course Overall',
'percent_a': 'A Grade',
'percent_b': 'B Grade',
'percent_c': 'C Grade',
'percent_d': 'D Grade',
'percent_f': 'F Grade',
'percent_incomplete': 'Incomplete',
'average_grade': 'Average GPA'
}.get(key, '???')
background_alpha = 1.0 if fill else 0.2
return {
'label': label,
'fill': fill,
'yAxisID': yaxis_id,
'backgroundColor': "rgba({0},{1},{2},{background_alpha})".format(*color, background_alpha=background_alpha),
'borderColor': "rgba({0},{1},{2},1)".format(*color),
'pointBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBorderColor': "#fff",
'pointHoverBorderWidth': 2,
'pointHoverRadius': 5,
'data': list(map(_overtime_builder(overtime_data, key), yearterms))
}
yearterms = raw_data[yearterms_key]
overtime_data = raw_data[overtime_key]
labels = list(map(self.convert_date, yearterms))
datasets = list(map(_overtime_dataset_builder, keys))
return tornado.escape.json_encode({
'labels': labels,
'datasets': datasets,
})
def embedded_javascript(self):
options = tornado.escape.json_encode(self.chart_options())
foo = '''
new Chart(document.getElementById("{2}").getContext("2d"),{{
type:'line',
data:{1},
options:{0}
}});
'''.format(options, self.chart_data, self.chart_id)
return foo
|
SFII/cufcq-new
|
modules/linechart_module.py
|
Python
|
mit
| 5,898
|
# Copyright 2017 Thomas Sterrenburg
#
# Licensed under the MIT License (the License); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at https://opensource.org/licenses/MIT#
import glob
import re
import sys
import time
from src.io.storage import get_request_items, store_fingerprint, get_number_of_malformed_requests
from static import variables
from static.arguments import parse_arguments
from static.blacklist import Blacklist
from static.logger import setup_logger, LOGNAME_START
from src.exchange.http import Request, UrlInfo, submit_string
from src.static.constants import NO_RESPONSE_CODE, DATA_NONE, LEXICAL, SEMANTIC, SYNTACTIC, DATA_LIST, KNOWN, \
SERVER_NAMES
logger = setup_logger()
global host_total
def add_characteristic(category, name, value, fingerprint, data_type=DATA_NONE):
if not fingerprint[category].has_key(name):
# TODO maybe remove data type
if data_type == 'list':
value = [value]
fingerprint[category][name] = value
return
if fingerprint[category][name] == value:
return
def get_characteristics(test_name, response, fingerprint, host, host_index, NO_RESPONSE=None):
# logger.debug("applying %s", test_name, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
response_code, response_text = response.return_code()
server_name_claimed = response.server_name()
if response_code not in [NO_RESPONSE, NO_RESPONSE_CODE]:
add_characteristic(LEXICAL, response_code, response_text, fingerprint)
add_characteristic(LEXICAL, 'SERVER_NAME_CLAIMED', server_name_claimed, fingerprint)
# nginx 404 test
# if response_code == '404':
# server_name_404 = get_server_name_404(response)
# if len(server_name_404) > 0:
# add_characteristic(LEXICAL, 'SERVER_NAME_404', server_name_404, fingerprint)
if test_name.startswith('malformed_'):
add_characteristic(SEMANTIC, test_name, response_code, fingerprint)
if response.has_header('Allow'):
data = response.header_data('Allow')
add_characteristic(SYNTACTIC, 'ALLOW_ORDER', data, fingerprint)
if response.has_header('Public'):
data = response.header_data('Public')
add_characteristic(SYNTACTIC, 'PUBLIC_ORDER', data, fingerprint)
if response.has_header('Vary'):
data = response.header_data('Vary')
add_characteristic(SYNTACTIC, 'VARY_ORDER', data, fingerprint)
if response_code not in [NO_RESPONSE_CODE, NO_RESPONSE]:
header_names = response.header_names()
add_characteristic(SYNTACTIC, 'HEADER_ORDER', header_names, fingerprint, data_type=DATA_LIST)
if response.has_header('ETag'):
data = response.header_data('ETag')
add_characteristic(SYNTACTIC, 'ETag', data, fingerprint)
elif response.has_header('Etag'):
data = response.header_data('Etag')
add_characteristic(SYNTACTIC, 'ETag', data, fingerprint)
def default_get(host, host_index, fingerprint):
request = Request(host, host_index, logger)
response = request.submit
if response.response_code == NO_RESPONSE_CODE:
raise ValueError('default_get failed')
else:
get_characteristics('default_get', response, fingerprint, host, host_index)
def default_options(host, host_index, fingerprint):
request = Request(host, host_index, logger, method='OPTIONS')
response = request.submit
get_characteristics('default_options', response, fingerprint, host, host_index)
def unknown_method(host, host_index, fingerprint):
request = Request(host, host_index, logger, method='ABCDEFG')
response = request.submit
get_characteristics('unknown_method', response, fingerprint, host, host_index)
def unauthorized_activity(host, host_index, fingerprint):
activities = ('OPTIONS', 'TRACE', 'GET', 'HEAD', 'DELETE',
'PUT', 'POST', 'COPY', 'MOVE', 'MKCOL',
'PROPFIND', 'PROPPATCH', 'LOCK', 'UNLOCK',
'SEARCH')
for activity in activities:
request = Request(host, host_index, logger, method=activity)
response = request.submit
get_characteristics('unauthorized_activity_' + activity, response, fingerprint, host, host_index)
def empty_uri(host, host_index, fingerprint):
request = Request(host, host_index, logger, local_uri='/ABCDEFG')
response = request.submit
get_characteristics('empty_uri', response, fingerprint, host, host_index)
def malformed_method(host, host_index, fingerprint):
malformed_methods = get_malformed_methods()
for index, method in enumerate(malformed_methods):
request = Request(host, host_index, logger)
request.method_line = method
response = request.submit
get_characteristics('MALFORMED_' + ('000' + str(index))[-3:], response, fingerprint, host, host_index)
def get_malformed_methods():
activities = 'GET', 'HEAD', 'POST', 'PUT'
malformed_methods_list = []
for activity in activities:
malformed_methods = (
activity,
activity + '/',
activity + '/1.0',
activity + ' / HTTP/123.45',
activity + ' / HTTP/999.99',
activity + ' / HTP/1.0',
activity + ' / HTT/1.0',
activity + ' / HTTP/7.Q',
activity + ' / HTTP/1.0X',
activity + ' /abcdefghijklmnopqrstuvwxyz/.. HTTP/1.0',
activity + ' /./././././././././././././././ HTTP/1.0',
activity + ' /.. HTTP/1.0',
activity + '\t/\tHTTP/1.0',
activity + '\t/\tHTTP/1.0',
activity + ' / H',
activity + ' / ' + 'HTTP/' + '1' * 1000 + '.0',
activity + ' FTP://abcdefghi HTTP/1.0',
activity + ' C:\ HTTP/1.0',
' ' * 1000 + activity + ' / HTTP/1.0',
'\n' + activity + ' / HTTP/1.0',
)
malformed_methods_list += malformed_methods
malformed_activity_independent = (
'GET GET GET',
'HELLO',
'%47%45%54 / HTTP/1.0',
'GEX\bT / HTTP/1.0'
)
malformed_methods_list += malformed_activity_independent
return malformed_methods_list
def unavailable_accept(host, host_index, fingerprint):
request = Request(host, host_index, logger)
request.add_header('Accept', 'abcd/efgh')
response = request.submit
get_characteristics('unavailable_accept', response, fingerprint, host, host_index)
def long_content_length(host, host_index, fingerprint):
request = Request(host, host_index, logger)
request.add_header('Content-Length', str(sys.maxint))
request.body = 'abcdefgh'
response = request.submit
get_characteristics('long_content_length', response, fingerprint, host, host_index)
def get_fingerprint(host, host_index, blacklist):
fingerprint = {
LEXICAL: {},
SYNTACTIC: {},
SEMANTIC: {}
}
url_info = UrlInfo(host)
request_items = get_request_items()
for name, request_string in request_items.iteritems():
try:
response = submit_string(request_string, name, url_info, host_index, logger)
get_characteristics(name, response, fingerprint, host, host_index)
except ValueError as e:
logger.warning("%s", e,
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
return fingerprint
# TODO deprecate
fingerprint_methods = [default_get, default_options, unknown_method, unauthorized_activity, empty_uri,
malformed_method, unavailable_accept, long_content_length]
for method in fingerprint_methods:
# logger.debug("processing %s", method.__name__, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
try:
logger.debug('applying method %s', method.__name__,
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
method(host, host_index, fingerprint)
except ValueError as e:
logger.warning("%s", e,
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
if method == default_get:
blacklist.insert(host)
logger.info('host added to blacklist',
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
break
return fingerprint
def get_known_fingerprints(args):
if args.gather is False:
fingerprints = []
directories = [args.known, 'data/output']
for directory in directories:
for filepath in glob.glob(directory + '/*'):
logger.debug("loading fingerprint %s", filepath, extra=LOGNAME_START)
with open(filepath, 'r') as file_handler:
f_fingerprint = eval(file_handler.read())
fingerprints.append(f_fingerprint)
return fingerprints
def get_fingerprint_scores(args, subject, known_fingerprints):
scores = []
for known in known_fingerprints:
similarity = {
'matches': 0,
'mismatches': 0,
'unknowns': 0
}
header_match = subject[LEXICAL].has_key('SERVER_NAME_CLAIMED') \
and known[LEXICAL].has_key('SERVER_NAME_CLAIMED') \
and subject[LEXICAL]['SERVER_NAME_CLAIMED'] == known[LEXICAL]['SERVER_NAME_CLAIMED']
if header_match and args.lazy:
certainty = 1
else:
similarity = find_similar_lexical(known, similarity, subject)
similarity = find_similar_syntactic(known, similarity, subject)
similarity = find_similar_semantic(known, similarity, subject)
matches = similarity['matches']
total = float(similarity['matches'] + similarity['mismatches'])
certainty = matches / total if total > 0 else 0
scores.append([known, similarity, certainty])
return scores
def find_similar_lexical(known, similarity, subject):
# TODO select appropriate response codes, the more the better
response_codes = range(200, 220) + \
range(300, 320) + \
range(400, 420) + \
range(500, 520)
for code in response_codes:
if known[LEXICAL].has_key(code) and subject[LEXICAL].has_key(code):
known_text = known[LEXICAL][code]
subject_text = subject[LEXICAL][code]
if known_text == '' or subject_text == '':
similarity['unknowns'] += 1
elif known_text == subject_text:
similarity['matches'] += 1
else:
similarity['mismatches'] += 1
return similarity
def find_similar_syntactic(known, similarity, subject):
similarity = find_similar_allow_order(known, similarity, subject)
# similarity = find_similar_etag(known, similarity, subject)
return similarity
def find_similar_allow_order(known, similarity, subject):
known_allows = subject_allows = ''
if known[SYNTACTIC].has_key('ALLOW_ORDER'):
known_allows = known[SYNTACTIC]['ALLOW_ORDER']
else:
return similarity
if subject[SYNTACTIC].has_key('ALLOW_ORDER'):
subject_allows = subject[SYNTACTIC]['ALLOW_ORDER']
if known_allows and subject_allows:
if known_allows == subject_allows:
similarity['matches'] += 1
else:
similarity['mismatches'] += 1
else:
similarity['unknowns'] += 1
return similarity
def find_similar_etag(known, similarity, subject):
known_etag = subject_etag = ''
if known[SYNTACTIC].has_key('ETag'):
known_etag = known[SYNTACTIC]['ETag']
if subject[SYNTACTIC].has_key('ETag'):
subject_etag = subject[SYNTACTIC]['ETag']
if known_etag == '' or subject_etag == '':
similarity['unknowns'] += 1
elif known_etag == subject_etag:
similarity['matches'] += 1
else:
similarity['mismatches'] += 1
return similarity
def find_similar_semantic(known, similarity, subject):
for i in range(get_number_of_malformed_requests()):
malformed = 'malformed_' + str(i)
if known[SEMANTIC].has_key(malformed):
known_malformed = known[SEMANTIC][malformed]
subject_malformed = subject[SEMANTIC][malformed]
if known_malformed == subject_malformed:
similarity['matches'] += 1
else:
similarity['mismatches'] += 1
else:
similarity['unknowns'] += 1
return similarity
def score_compare(score_a, score_b):
server_a = score_a[0]
# matches_a = score_a[1]['matches']
matches_a = score_a[2]
server_b = score_b[0]
# matches_b = score_b[1]['matches']
matches_b = score_b[2]
compared = -cmp(matches_a, matches_b)
if compared != 0:
return compared
else:
return -cmp(server_a, server_b)
def sort_scores(scores):
if len(scores) is 1:
return scores
scores.sort(score_compare)
return scores
def print_scores(hostname, scores):
lint = "-" * 80
print '\n%s\n%-50s\n%-50s %4s (%4s : %3s : %3s)' % (
lint, hostname[:50], 'name', 'certainty', 'matches', 'mismatches', 'unknowns')
for score in scores:
a = score[0]
b = score[0][LEXICAL]
# c = score[0][LEXICAL]['SERVER_NAME_CLAIMED']
name = score[0][LEXICAL]['SERVER_NAME_CLAIMED'] if score[0][LEXICAL].has_key('SERVER_NAME_CLAIMED') else 'NO_BANNER'
matches = score[1]['matches']
mismatches = score[1]['mismatches']
unknowns = score[1]['unknowns']
certainty = score[2]
print '%-50s %.3f (%2d : %2d : %2d)' % (name, certainty, matches, mismatches, unknowns)
print lint
def get_hosts(args):
hosts = []
if args.input is not None:
hosts.append(args.input)
else:
hosts += [host.strip() for host in args.file.readlines()]
return hosts
def process_host(args, host, host_index, known_fingerprints, blacklist):
f = get_fingerprint(host, host_index, blacklist)
url_info = UrlInfo(host)
if SERVER_NAMES is True:
banner = f[LEXICAL]['SERVER_NAME_CLAIMED']
if isinstance(banner, basestring):
filename = banner.split()[0]
else:
filename = banner[0].split()[0]
filename = filename.replace('/', '_')
directory = KNOWN
else:
if url_info.port != 80:
filename = url_info.host + ':' + str(url_info.port)
else:
filename = url_info.host
directory = args.output
store_fingerprint(directory, f, filename)
if args.gather is False:
scores = get_fingerprint_scores(args, f, known_fingerprints)
scores = sort_scores(scores)
print_scores(host, scores)
def process_hosts(args, hosts, known_fingerprints, blacklist):
blacklist_hosts = blacklist.get_hosts()
for host_index, host in enumerate(hosts):
try:
host_index += 1
logger.info("processing host (%s/%s)", host_index, len(hosts),
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
if host not in blacklist_hosts:
process_host(args, host, host_index, known_fingerprints, blacklist)
else:
logger.warning('host is blacklisted',
extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
except ValueError as e:
logger.error(e, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total})
if __name__ == '__main__':
try:
args = parse_arguments()
logger = setup_logger(args)
hosts = get_hosts(args)
variables.init(len(hosts))
blacklist = Blacklist()
# hosts = hosts[-500:]
# hosts = [hosts[1]]
variables.host_total = len(hosts)
known_fingerprints = get_known_fingerprints(args)
process_hosts(args, hosts, known_fingerprints, blacklist)
print "NOW!!!"
time.sleep(50)
# Request.exporter.generate_output_file()
except KeyboardInterrupt:
Request.exporter.generate_output_file()
sys.exit()
|
thomas-sterrenburg/fingerprinting-python
|
src/main.py
|
Python
|
mit
| 16,603
|
class Board(object):
"""This class defines the board"""
def __init__(self, board_size):
"""The initializer for the class"""
self.board_size = board_size
self.board = []
for index in range(0, self.board_size):
self.board.append(['0'] * self.board_size)
def is_on_board(self, x_coordinate, y_coordinate):
"""Is the piece on the board"""
return bool((0 <= x_coordinate < self.board_size) and (0 <= y_coordinate < self.board_size))
def place_piece(self, x_coordinate, y_coordinate, value):
"""Place a piece on the board"""
try:
if not self.is_on_board(x_coordinate, y_coordinate):
raise Exception('not_on_board')
if self.is_piece_set(x_coordinate, y_coordinate):
raise Exception('piece_is_set')
self.update_cell(x_coordinate, y_coordinate, value)
except ValueError as err:
print(err.args)
def update_cell(self, x_coordinate, y_coordinate, value):
"""Update the placement of the piece on the board"""
self.board[(y_coordinate-1)].insert((x_coordinate-1), str(value))
self.board[(y_coordinate-1)].pop(x_coordinate)
def print_board(self):
"""Print the board"""
for row in self.board:
print(" ".join(row))
def is_piece_set(self, x_coordinate, y_coordinate):
"""Check to see if a piece is set """
if self.is_on_board(x_coordinate, y_coordinate):
return bool(str(self.board[(y_coordinate-1)][(x_coordinate-1)]) != '0')
|
jonbrohauge/pySudokuSolver
|
board.py
|
Python
|
mit
| 1,589
|
# -*- coding: utf-8 -*-
import os.path
import time
import urllib
import json
import requests
from tencentyun import conf
from .auth import Auth
class ImageProcess(object):
def __init__(self, appid, secret_id, secret_key, bucket):
self.IMAGE_FILE_NOT_EXISTS = -1
self._secret_id,self._secret_key = secret_id,secret_key
conf.set_app_info(appid, secret_id, secret_key, bucket)
def porn_detect(self, porn_detect_url):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign(porn_detect_url)
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
payload = {
'bucket':app_info['bucket'],
'appid':int(app_info['appid']),
'url':(porn_detect_url).encode("utf-8"),
}
header = {
'Authorization':sign,
'Content-Type':'application/json',
}
r = {}
r = requests.post(url, data=json.dumps(payload), headers=header)
ret = r.json()
return ret
def porn_detect_url(self, porn_url):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign()
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
payload = {
'bucket':app_info['bucket'],
'appid':int(app_info['appid']),
'url_list':porn_url,
}
header = {
'Authorization':sign,
'Content-Type':'application/json',
}
r = {}
r = requests.post(url, data=json.dumps(payload), headers=header)
ret = r.json()
return ret
def porn_detect_file(self, porn_file):
auth = Auth(self._secret_id, self._secret_key)
sign = auth.get_porn_detect_sign()
app_info = conf.get_app_info()
if False == sign:
return {
'code':9,
'message':'Secret id or key is empty.',
'data':{},
}
url = app_info['end_point_porndetect']
header = {
'Authorization':sign,
}
files = {
'appid':(None,app_info['appid'],None),
'bucket':(None,app_info['bucket'],None),
}
i=0
for pfile in porn_file:
pfile = pfile.decode('utf-8')
local_path = os.path.abspath(pfile)
if not os.path.exists(local_path):
return {'httpcode':0, 'code':self.IMAGE_FILE_NOT_EXISTS, 'message':'file ' + pfile + ' not exists', 'data':{}}
i+=1
files['image['+str(i-1)+']']=(pfile, open(pfile,'rb'))
r = requests.post(url, headers=header, files=files)
ret = r.json()
return ret
|
tencentyun/python-sdk
|
python2/tencentyun/imageprocess.py
|
Python
|
mit
| 3,116
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'barnacoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
FinalHashLLC/Barnacoin
|
contrib/devtools/update-translations.py
|
Python
|
mit
| 6,783
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# iflytek.py ---
#
# Filename: iflytek.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Thu Sep 14 09:01:20 2017 (+0800)
#
# Change Log:
#
#
import time
from ctypes import *
from io import BytesIO
import wave
import platform
import logging
import os
import contextlib
logging.basicConfig(level=logging.DEBUG)
BASEPATH=os.path.split(os.path.realpath(__file__))[0]
def not_none_return(obj, defobj):
if obj:
return obj
else:
return defobj
class iflytekTTS():
def __init__(self, appid=None, voice_name=None, speed=None, volume=None, pitch=None):
self.__appid = not_none_return(appid, '59b4d5d4')
self.__voice_name = not_none_return(voice_name, 'xiaowanzi')
self.__speed = not_none_return(speed, 50)
self.__volume = not_none_return(volume, 50)
self.__pitch = not_none_return(pitch, 50)
self.__cur = cdll.LoadLibrary(os.path.join(BASEPATH, 'iflytek/libmsc.so'))
self.__iflytek_init()
def __save_file(self, raw_data, _tmpFile = '/tmp/test.wav'):
if os.path.exists(_tmpFile) :
return
tmpFile = _tmpFile + '.tmp'
with contextlib.closing(wave.open(tmpFile , 'w')) as f:
f.setparams((1, 2, 16000, 262720, 'NONE', 'not compressed'))
f.writeframesraw(raw_data)
os.rename(tmpFile, _tmpFile)
def __iflytek_init(self):
MSPLogin = self.__cur.MSPLogin
ret = MSPLogin(None,None,'appid = {}, work_dir = .'.format(self.__appid))
if ret != 0:
logging.error("MSPLogin failed, error code: {}".format(ret))
return False
return True
def get_tts_audio(self, src_text, filename, language='zh', options=None):
fname = os.path.join('/tmp/', filename + '.' + 'wav')
QTTSSessionBegin = self.__cur.QTTSSessionBegin
QTTSTextPut = self.__cur.QTTSTextPut
QTTSAudioGet = self.__cur.QTTSAudioGet
QTTSAudioGet.restype = c_void_p
QTTSSessionEnd = self.__cur.QTTSSessionEnd
ret_c = c_int(0)
ret = 0
session_begin_params="voice_name = {}, text_encoding = utf8, sample_rate = 16000, speed = {}, volume = {}, pitch = {}, rdn = 2".format(self.__voice_name, self.__speed, self.__volume, self.__pitch)
sessionID = QTTSSessionBegin(session_begin_params, byref(ret_c))
if ret_c.value == 10111: # 没有初始化
if self.__iflytek_init():
return self.get_tts_audio(src_text, filename)
if ret_c.value != 0:
logging.error("QTTSSessionBegin failed, error code: {}".format(ret_c.value))
return
ret = QTTSTextPut(sessionID, src_text, len(src_text), None)
if ret != 0:
logging.error("QTTSTextPut failed, error code:{}".format(ret))
QTTSSessionEnd(sessionID, "TextPutError")
return
logging.info("正在合成 [{}]...".format(src_text))
audio_len = c_uint(0)
synth_status = c_int(0)
f = BytesIO()
while True:
p = QTTSAudioGet(sessionID, byref(audio_len), byref(synth_status), byref(ret_c))
if ret_c.value != 0:
logging.error("QTTSAudioGet failed, error code: {}".format(ret_c))
QTTSSessionEnd(sessionID, "AudioGetError")
break
if p != None:
buf = (c_char * audio_len.value).from_address(p)
f.write(buf)
if synth_status.value == 2:
self.__save_file(f.getvalue(), fname)
break
time.sleep(0.5)
logging.info('合成完成!')
ret = QTTSSessionEnd(sessionID, "Normal")
if ret != 0:
logging.error("QTTSSessionEnd failed, error code:{}".format(ret))
return ('wav', fname)
if __name__ == '__main__':
tts = iflytekTTS()
def md5sum(contents):
import hashlib
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
import sys
basename = md5sum(sys.argv[1])
t, f = tts.get_tts_audio(sys.argv[1], basename, 'zh');
def mplayer(f):
import commands
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 82 {}'.format(f))
mplayer(f)
import os
print f
basename = md5sum(sys.argv[1][:-1])
t, f = tts.get_tts_audio(sys.argv[1][:-1], basename, 'zh');
print f
#os.remove(f)
|
pengzhangdev/slackbot
|
slackbot/plugins/component/ttsdriver/iflytek.py
|
Python
|
mit
| 4,519
|
import unittest
from fam.tests.models.test01 import Dog, Cat, Person, JackRussell, Monarch
from fam.mapper import ClassMapper
class MapperTests(unittest.TestCase):
def setUp(self):
self.mapper = ClassMapper([Dog, Cat, Person, JackRussell, Monarch])
def tearDown(self):
pass
def test_sub_class_refs(self):
self.assertEqual(set(Monarch.fields.keys()), set(["name", "country", "cats", "dogs", "animals", "callbacks"]))
self.assertEqual(set(Monarch.cls_fields.keys()), {"country"})
|
paulharter/fam
|
src/fam/tests/test_couchdb/test_mapping.py
|
Python
|
mit
| 535
|
# coding=utf-8
import random
lista = []
for x in range(10):
numero = random.randint(1, 100)
if x == 0:
maior, menor = numero, numero
elif numero > maior:
maior = numero
elif numero < menor:
menor = numero
lista.append(numero)
lista.sort()
print(lista)
print("Maior: %d" % maior)
print("Menor: %d" % menor)
|
renebentes/Python4Zumbis
|
Exercícios/Lista IV/questao01.py
|
Python
|
mit
| 350
|
import pyb
import micropython
micropython.alloc_emergency_exception_buf(100)
led1 = pyb.LED(4) # 4 = Blue
led2 = pyb.LED(3) # 3 = Yellow
pin = pyb.Pin('SW', pyb.Pin.IN, pull=pyb.Pin.PULL_UP)
def callback(line):
led1.toggle()
if pin.value(): # 1 = not pressed
led2.off()
else:
led2.on()
ext = pyb.ExtInt(pin, pyb.ExtInt.IRQ_RISING_FALLING, pyb.Pin.PULL_UP, callback)
|
dhylands/upy-examples
|
extint_toggle.py
|
Python
|
mit
| 401
|
from webalchemy import config
FREEZE_OUTPUT = 'webglearth.html'
class Earth:
def __init__(self):
self.width = window.innerWidth
self.height = window.innerHeight
# Earth params
self.radius = 0.5
self.segments = 64
self.rotation = 6
self.scene = new(THREE.Scene)
self.camera = new(THREE.PerspectiveCamera, 45, self.width / self.height, 0.01, 1000)
self.camera.position.z = 1.5
self.renderer = new(THREE.WebGLRenderer)
self.renderer.setSize(self.width, self.height)
self.scene.add(new(THREE.AmbientLight, 0x333333))
self.light = new(THREE.DirectionalLight, 0xffffff, 1)
self.light.position.set(5, 3, 5)
self.scene.add(self.light)
self.sphere = self.createSphere(self.radius, self.segments)
self.sphere.rotation.y = self.rotation
self.scene.add(self.sphere)
self.clouds = self.createClouds(self.radius, self.segments)
self.clouds.rotation.y = self.rotation
self.scene.add(self.clouds)
self.stars = self.createStars(90, 64)
self.scene.add(self.stars)
self.mx = 0
self.my = 0
self.mdx = 0
self.mdy = 0
self.angx = 0
self.angy = 0
self.renderer.domElement.onmouseup = self.wrap(self, self.mouseup)
self.renderer.domElement.onmousedown = self.wrap(self, self.mousedown)
def mousemove(self, e):
self.mdx += e.screenX - self.mx
self.mdy += e.screenY - self.my
self.mx = e.screenX
self.my = e.screenY
def mouseup(self, e):
self.renderer.domElement.onmousemove = None
def mousedown(self, e):
self.mx = e.screenX
self.my = e.screenY
self.renderer.domElement.onmousemove = self.wrap(self, self.mousemove)
def wrap(self, object, method):
def wrapper():
return method.apply(object, arguments)
return wrapper
def render(self):
if Math.abs(self.mdx) > 1.1 or Math.abs(self.mdy) > 1.1:
self.angx -= self.mdx/5000
self.mdx -= self.mdx/20
if Math.abs(self.angy + self.mdy/5000) < 3.14/2:
self.angy += self.mdy/10000
self.mdy -= self.mdy/20
self.camera.position.x = 1.5 *Math.sin(self.angx) *Math.cos(self.angy)
self.camera.position.z = 1.5 *Math.cos(self.angx) *Math.cos(self.angy)
self.camera.position.y = 1.5 *Math.sin(self.angy)
self.camera.lookAt(self.scene.position)
self.sphere.rotation.y += 0.0005
self.clouds.rotation.y += 0.0004
requestAnimationFrame(self.wrap(self, self.render))
self.renderer.render(self.scene, self.camera)
def createSphere(self, radius, segments):
geometry = new(THREE.SphereGeometry, radius, segments, segments)
material = new(THREE.MeshPhongMaterial, {
'map': THREE.ImageUtils.loadTexture('static/lowres_noclouds.jpg'),
'bumpMap': THREE.ImageUtils.loadTexture('static/lowres_elevbump.jpg'),
'bumpScale': 0.005,
'specularMap': THREE.ImageUtils.loadTexture('static/lowres_water.png'),
'specular': new(THREE.Color, 'grey')
})
return new(THREE.Mesh, geometry, material)
def createClouds(self, radius, segments):
geometry = new(THREE.SphereGeometry, radius + 0.005, segments, segments)
material = new(THREE.MeshPhongMaterial, {
'map': THREE.ImageUtils.loadTexture('static/lowres_fairclouds.png'),
'transparent': true
})
return new(THREE.Mesh, geometry, material)
def createStars(self, radius, segments):
geometry = new(THREE.SphereGeometry, radius, segments, segments)
material = new(THREE.MeshBasicMaterial, {
'map': THREE.ImageUtils.loadTexture('static/lowres_starfield.png'),
'side': THREE.BackSide
})
return new(THREE.Mesh, geometry, material)
class ThreeDEarth:
include = ['https://rawgithub.com/mrdoob/three.js/master/build/three.min.js']
config = config.from_object(__name__)
def initialize(self, **kwargs):
self.rdoc = kwargs['remote_document']
self.rdoc.body.style(
margin=0,
overflow='hidden',
backgroundColor='#000'
)
self.earth = self.rdoc.new(Earth)
self.rdoc.body.append(self.earth.renderer.domElement)
self.rdoc.stylesheet.rule('a').style(color='#FFF')
e = self.rdoc.body.element('p')
e.prop.innerHTML = "Powered by <a href='https://github.com/skariel/webalchemy'>Webalchemy</a><br/>" +\
"Adapted from <a href='https://github.com/turban/webgl-earth/blob/master/index.html'>this</a><br/>" +\
"Pure Python source is <a href='https://github.com/skariel/webalchemy/blob/master/examples/three_d_earth/three_d_earth.py'>here</a>"
e.style(
color='#FFF',
position='absolute',
left='10px', top='10px'
)
self.earth.render()
|
skariel/webalchemy
|
examples/three_d_earth/three_d_earth.py
|
Python
|
mit
| 5,235
|
"""cdbgui.py
Developers: Christina Hammer, Noelle Todd
Last Updated: August 19, 2014
This file contains a class version of the interface, in an effort to
make a program with no global variables.
"""
from datetime import datetime, timedelta, date
from tkinter import *
from tkinter import ttk
from cdbifunc2 import *
import cdbvolunteer
class allobjects:
"""This class attempts to contain ALL labels, entries, etc.,
so that there are no global variables.
"""
def __init__(self, volunteerID, volunteerName, bgcolor):
"""This function declares all variables that are used by
more than one function.
"""
self.volID = volunteerID #the id of the volunteer who logged in
self.volunteerName = volunteerName
self.bgcolor = bgcolor
#Variables used later on
self.cursel = 0
self.selectedVisit = 0
self.id_list = []
self.mem_list = []
self.clientlist = list_people()
self.visitDict = {}
#holds entryboxes for family members
self.memDict = {}
self.info = {}
self.addmemberON = False #checks if member boxes have already been added
#dictionaries/lists used for date entry
self.month_li = ["January", "February", "March", "April",
"May", "June", "July", "August", "September",
"October", "November", "December"]
self.month_day_dict = {"January":31, "February":29, "March":31,
"April":30, "May":31, "June":30, "July":31,
"August":31, "September":30, "October":31,
"November":30, "December":31}
self.month_int = {1:"January", 2:"February", 3:"March",
4:"April", 5:"May", 6:"June", 7:"July",
8:"August", 9:"September", 10:"October",
11:"November", 12:"December"}
self.int_month = {"January":1, "February":2, "March":3,
"April":4, "May":5, "June":6, "July":7,
"August":8, "September":9, "October":10,
"November":11, "December":12}
#customize colors/fonts
#This will connect to the database itself,
#and retrieve the colors from there.
#self.bgcolor = 'light blue' #'lavender'
#self.labfont = 'Helvetica'
#self.labBGcolor = 'gray10'
#self.labFGcolor = 'white'
#self.cliSearLabBG = 'Coral'
#self.cliSearLabFG = 'white'
#configuring window
self.ciGui=Tk()
self.gridframe=Frame(self.ciGui).grid()
self.ciGui.configure(background=self.bgcolor)
self.ciGui.title('Food Pantry Database')
#CLIENT SEARCH SETUP
self.cslabel = Label(self.gridframe,text='Client Search',
font=("Helvetica", 16),fg='white',bg='gray10')\
.grid(row=0,column=0,columnspan=2, sticky=W)
self.csblank = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=0,column=2,sticky=E)
#Name Searchbox
self.ns = StringVar()
self.nameSearchEnt = Entry(self.gridframe, cursor = 'shuttle',
textvariable=self.ns)
self.nameSearchEnt.grid(row=2,column=0)
self.nameSearchEnt.bind('<Key>',self.nameSearch)
self.searchButton = Button(self.gridframe, text='Search Clients',
command=self.nameSearch)
self.searchButton.grid(row=2, column=1)
#Client Listbox
self.client_listbox = Listbox(self.gridframe,height=10,width=40)
self.client_listbox.bind('<<ListboxSelect>>', self.displayInfo )
self.client_listbox.config(exportselection=0)
self.scrollb = Scrollbar(self.gridframe)
self.client_listbox.bind('<<ListboxSelect>>',self.displayInfo )
self.client_listbox.config(yscrollcommand=self.scrollb.set)
self.scrollb.config(command=self.client_listbox.yview)
self.client_listbox.grid(row=3, column=0, rowspan=5, columnspan=2)
self.scrollb.grid(row=3, column=1, rowspan=5, sticky=E+N+S)
self.firstSep = ttk.Separator(self.gridframe, orient='vertical')\
.grid(row=1,column=2,rowspan=40,sticky=NS)
self.NCButton = Button(self.gridframe, text='New Client',
command=self.newClientDisplay, width=25)\
.grid(row=9, column=0, columnspan=2)
#CLIENT INFORMATION SETUP
self.secondSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=0,column=3,columnspan=40,sticky=EW)
self.cilabel = Label(self.gridframe, text='Client Information',
font=("Helvetica", 16),fg='white',bg='gray10')\
.grid(row=0,column=3,columnspan=12, sticky=W)
self.ciblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
bg=self.bgcolor).grid(row=1,column=3,sticky=E)
#First name
self.fnv = StringVar()
self.fnlabel = Label(self.gridframe, text="First Name: ",
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2, column=3,rowspan=2,sticky=E)
self.fname = Entry(self.gridframe, textvariable=self.fnv,bd=4)
self.fname.grid(row=2, column=4, rowspan=2, columnspan=1, sticky=W)
#Last name
self.lnv = StringVar()
self.lnlabel = Label(self.gridframe, text='Last Name: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2,column=5,rowspan=2, sticky=W)
self.lname = Entry(self.gridframe, textvariable=self.lnv,bd=4)
self.lname.grid(row=2,column=6, rowspan=2, columnspan=1, sticky=W)
#Phone
self.phv = StringVar()
self.phlabel = Label(self.gridframe, text='Phone: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=2, column=7,rowspan=2, sticky=E)
self.phone = Entry(self.gridframe, textvariable=self.phv, bd=4)
self.phone.grid(row=2, column=8, columnspan=2, rowspan=2, sticky=W)
#Date of Birth
self.doblabel = Label(self.gridframe, text='Date of Birth: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=4,column=3, rowspan=2, sticky=E)
self.mv = StringVar()
self.dv = StringVar()
self.yv = StringVar()
#dob month combobox
self.mob = ttk.Combobox(self.gridframe, width=10, state='readonly',
values=self.month_li, textvariable=self.mv)
self.mob.bind('<<ComboboxSelected>>', self.monthbox_select)
#dob day spinbox
self.dob = Spinbox(self.gridframe, from_=0, to=0,
textvariable=self.dv, width=5, bd=4)
#dob year spinbox
self.yob = Spinbox(self.gridframe, from_=1900, to=2500,
textvariable=self.yv, width=7, bd=4)
self.mob.grid(row=4, column=4, rowspan=2, sticky=W)
self.dob.grid(row=4, column=4, rowspan=2, sticky=E)
self.yob.grid(row=4, column=5, rowspan=2)
#Age
self.agev = StringVar()
self.avallabel = Label(self.gridframe, textvariable=self.agev,
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=4,column=6, rowspan=2)
#Date Joined
self.datejoinv = StringVar()
self.djlabel = Label(self.gridframe, text="Date Joined:",
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=4,column=7,rowspan=2, sticky=E)
self.djEntry = Entry(self.gridframe, textvariable=self.datejoinv,
bd=4).grid(row=4, column=8, rowspan=2)
#VISIT INFORMATION SETUP
self.thirdSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=6,column=3,columnspan=40,sticky=EW)
self.vilabel = Label(self.gridframe,text='Visit Information',
font=("Helvetica", 16),fg='white', bg='gray10')\
.grid(row=6,column=3,columnspan=12, sticky=W)
self.datelab = Label(self.gridframe, text='Date: ',
font=('Helvetica',14), bg=self.bgcolor)\
.grid(row=7,column=3)
self.notelab = Label(self.gridframe, text='Notes:',
font=('Helvetica',14), bg=self.bgcolor)\
.grid(row=7,column=4)
self.vislab = Label(self.gridframe, text='Visitor: ',
font=('Helvetica',14),bg=self.bgcolor)\
.grid(row=7,column=7, padx=10)
self.vollab = Label(self.gridframe, text='Volunteer: ',
font=('Helvetica',14),bg=self.bgcolor)\
.grid(row=9, column=7, padx=10)
self.visit_listbox = Listbox(self.gridframe,height=4,width=15,font=12, bd=4)
self.visit_listbox.bind('<<ListboxSelect>>', self.displayVisit)
self.visit_listbox.config(exportselection=0)
self.visit_scroll = Scrollbar(self.gridframe)
self.visit_listbox.config(yscrollcommand=self.visit_scroll.set)
self.visit_scroll.config(command=self.visit_listbox.yview)
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
#Entry box for visit (when new visit is added)
self.visdatev = StringVar()
self.visitdate = Entry(self.gridframe,textvariable=self.visdatev,bd=4)
#self.visitdate.grid(row=8, column=3)
#visit notes
self.notv = StringVar()
self.notescv = Text(self.gridframe, state='disabled', width=50, height=4, bd=4, font='Helvetica')
self.vnotes_scroll = Scrollbar(self.gridframe)
self.notescv.config(yscrollcommand=self.vnotes_scroll.set)
self.vnotes_scroll.config(command=self.notescv.yview)
#visit notes
self.notescv.grid(row=8, column=4, columnspan=3, rowspan=4, sticky=W, padx=10)
self.vnotes_scroll.grid(row=8, column=4, rowspan=4, columnspan=3, sticky=E+N+S)
#visit visitor
self.visv = StringVar()
self.visitor = Entry(self.gridframe,textvariable=self.visv,
state='readonly',bd=4)
self.visitor.grid(row=8, column=7, rowspan=1, sticky=E, padx=10)
#visit volunteer
self.volv = IntVar()
self.volun = Entry(self.gridframe,textvariable=self.volv,bd=4,
state='readonly')
self.volun.grid(row=10, column=7, rowspan=1, padx=10)
#Extra blank label
self.blankLab2 = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=13,column=3, rowspan=2, sticky=E)
#Visit buttons
self.newVisit = Button(self.gridframe, text='New Visit', width=15,
command=self.newvisitf)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit = Button(self.gridframe, text='Edit Visit', width=15,
command=self.editvisitf)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit = Button(self.gridframe, text='Delete Visit', width=15,
command=self.deletevisitf)
self.deleteVisit.grid(row=10, column=8, sticky=W)
#records/updates visit
self.saveVisit = Button(self.gridframe, text='Save Visit', width=15,
command=self.recordVisit)
self.saveVisitE = Button(self.gridframe, text='Save Visit', width=15,
command=self.savevisitf)
#self.saveVisit.grid(row=8,column=8,sticky=W)
self.cancelVisit = Button(self.gridframe, text='Cancel', width=15,
command=self.cancelvisitf)
#self.cancelVisit.grid(row=9, column=8, sticky=W)
#HOUSEHOLD INFORMATION SETUP
self.fourthSep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=15,column=3,columnspan=40,sticky=EW)
self.hilabel = Label(self.gridframe,text='Household Information',
font=("Helvetica", 16),fg='white', bg='gray10')\
.grid(row=15,column=3,columnspan=12, sticky=W)
#blank line
self.hiblank = Label(self.gridframe, text=' ',font=('Helvetica',10),
bg=self.bgcolor).grid(row=16,column=3,sticky=E)
#street address
self.adv = StringVar()
self.adlab = Label(self.gridframe, text='Address: ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=17,column=3, rowspan=2, sticky=E)
self.address = Entry(self.gridframe,textvariable=self.adv,
width=40,bd=4)
self.address.grid(row=17, column=4,columnspan=2, rowspan=2)
#apartment
self.apv = StringVar()
self.aplab = Label(self.gridframe, text='Apt: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=17,column=6,
rowspan=2, sticky=E)
self.aptn = Entry(self.gridframe,textvariable=self.apv,width=10,bd=4)
self.aptn.grid(row=17,column=7, rowspan=2, sticky=W)
#city
self.ctyv = StringVar()
self.cilab = Label(self.gridframe, text='City: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=17,column=8, rowspan=2, sticky=E)
self.city = Entry(self.gridframe,textvariable=self.ctyv,bd=4)
self.city.grid(row=17,column=9, rowspan=2, sticky=W)
#state
self.stav = StringVar()
self.stlab = Label(self.gridframe, text='State: ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=20,column=3, rowspan=2, sticky=E)
self.state = Entry(self.gridframe,textvariable=self.stav,bd=4)
self.state.grid(row=20,column=4, rowspan=2)
#zip
self.zpv = StringVar()
self.zilab = Label(self.gridframe, text='Zip Code: ',font=('Helvetica',12),
bg=self.bgcolor).grid(row=20, column=5, rowspan=2, sticky=E)
self.zipc = Entry(self.gridframe,textvariable=self.zpv,bd=4)
self.zipc.grid(row=20, column=6, rowspan=2)
#Date Verified
self.dverilabel = Label(self.gridframe, text='Last Verified: ',
font=('Helvetica',12),bg=self.bgcolor)\
.grid(row=20,column=7, rowspan=2, sticky=E)
self.mvv = StringVar()
self.dvv = StringVar()
self.yvv = StringVar()
self.mvv.set("")
self.dvv.set("")
self.yvv.set("")
#for month entry
self.mov = ttk.Combobox(self.gridframe, width=10, state='readonly',
values=self.month_li, textvariable=self.mvv)
#self.mob.bind('<<ComboboxSelected>>', self.monthbox_select)
#for day entry
self.dov = Spinbox(self.gridframe, from_=0, to=0,
textvariable=self.dvv, width=5, bd=4)
#for year entry
self.yov = Spinbox(self.gridframe, from_=1900, to=2500,
textvariable=self.yvv, width=9, bd=4)
self.mov.grid(row=20, column=8, rowspan=2, sticky=E, padx=10)
self.dov.grid(row=20, column=9, columnspan=2, rowspan=2, padx=10, sticky=W)
self.yov.grid(row=20, column=10, rowspan=2, padx=10, sticky=W)
#formatting labels/objects
self.blankLab5 = Label(self.gridframe, text=' ',
font=('Helvetica',12), bg=self.bgcolor)\
.grid(row=23,column=3,sticky=E)
self.blankLab6 = Label(self.gridframe, text=' ',
font=('Helvetica',10), bg=self.bgcolor)\
.grid(row=25,column=3,sticky=E)
self.fifthsep = ttk.Separator(self.gridframe, orient='horizontal')\
.grid(row=27,column=3,columnspan=40,sticky=EW, pady=10)
#The following variables will be removed and re-gridded
#as the function of the interface changes.
#
#HOUSEHOLD MEMBERS SETUP
#These variables appear on the updateClientDisplay only
#
#info display widgets
self.adl = StringVar()
self.dispad = Label(self.gridframe,textvariable=self.adl,
font=('Helvetica',12),bg=self.bgcolor)
self.chil = StringVar()
self.dischil = Label(self.gridframe,textvariable=self.chil,
font=('Helvetica',12),bg=self.bgcolor)
self.sen = StringVar()
self.dissen = Label(self.gridframe,textvariable=self.sen,
font=('Helvetica',12),bg=self.bgcolor)
self.inf = StringVar()
self.disinf = Label(self.gridframe,textvariable=self.inf,
font=('Helvetica',12),bg=self.bgcolor)
self.tot = StringVar()
self.distot = Label(self.gridframe, textvariable=self.tot,
bg=self.bgcolor,font=('Helvetica',12))
self.houseSep = ttk.Separator(self.gridframe, orient='horizontal')
self.houseSep.grid(row=23,column=3,columnspan=40,sticky=EW)
self.housetitle = Label(self.gridframe,text='Household Members',
font=("Helvetica", 16),fg='white',bg='gray10')
self.housetitle.grid(row=23,column=3,columnspan=12, sticky=W)
#listbox of family members
self.family_listbox = Listbox(self.gridframe,height=5,width=35,font=12)
self.family_listbox.config(exportselection=0)
self.fam_scroll = Scrollbar(self.gridframe)
self.family_listbox.config(yscrollcommand=self.fam_scroll.set)
self.fam_scroll.config(command=self.family_listbox.yview)
self.family_listbox.grid(row=24, column=3, rowspan=3, columnspan=2, sticky=W)
self.fam_scroll.grid(row=24, column=4, rowspan=3, columnspan=1, sticky=E+N+S)
#family member buttons
self.addmemb = Button(self.gridframe, text='Add Member', width=14,
command=self.addMemberEntryBoxes)
self.addmemb.grid(row=24,column=5,sticky=E+N+S)
self.removmemb = Button(self.gridframe, text='Remove Member',width=14,
command=self.removeMemberConfirm)
self.removmemb.grid(row=25,column=5,sticky=E+N+S)
self.viewmemb = Button(self.gridframe, text='View Member',width=14,
command=self.runViewMember)
self.viewmemb.grid(row=26,column=5,sticky=E+N+S)
#update save/cancel buttons
self.saveB = Button(self.gridframe, text='Save Changes',
command=self.updateInfo,width=20)
self.saveB.grid(row=28, column=3, columnspan=2)
self.cancelB = Button(self.gridframe, text='Cancel Changes',
command=self.cancel_changes,width=20)
self.cancelB.grid(row=28, column=5, columnspan=2)
#NEW CLIENT DISPLAY WIDGETS
#These variables appear on the newClientDisplay only
#
self.addhhsep = ttk.Separator(self.gridframe, orient='horizontal')
self.addhhtitle = Label(self.gridframe,text='Add Household Members',
font=("Helvetica", 16),fg='white',bg='gray10')
#add members to new household variable
self.q = IntVar()
self.famNum = Entry(self.gridframe, textvariable=self.q)
self.entNum = Label(self.gridframe,
text='Total Family Members: ',
font=('Helvetica',10),bg=self.bgcolor)
self.famname = Label(self.gridframe, text='Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famfn = Label(self.gridframe, text='First Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famln = Label(self.gridframe, text='Last Name:',
font=('Helvetica',10),bg=self.bgcolor)
self.famdob = Label(self.gridframe, text='Date of Birth:',
font=('Helvetica',10),bg=self.bgcolor)
self.famphone = Label(self.gridframe, text='Phone',
font=('Helvetica',10),bg=self.bgcolor)
self.fammon = Label(self.gridframe,text='mm',
font=('Helvetica',10),bg=self.bgcolor)
self.famday = Label(self.gridframe,text='dd',
font=('Helvetica',10),bg=self.bgcolor)
self.famyear = Label(self.gridframe,text='yyyy',
font=('Helvetica',10),bg=self.bgcolor)
self.newMembersB = Button(self.gridframe, text='Add Members',
command=self.familyEntryBoxes)
self.newClientSave = Button(self.gridframe, text='Save Client',
command=self.addNew)
self.cancelNewB = Button(self.gridframe, text='Cancel New Entry',
command=self.updateClientDisplay)
#MENU SETUP
self.menubar = Menu(self.ciGui)
#^Essentially re-selects client
self.volmenu = Menu(self.menubar, tearoff=0)
self.volmenu.add_command(label='Log Off', command=self.logoff)
self.volmenu.add_command(label='Configure Color', command=self.configure_background)
self.menubar.add_cascade(label='Volunteers',menu=self.volmenu)
self.optionsmenu = Menu(self.menubar,tearoff=0)
self.optionsmenu.add_command(label='Quit', command=self.quitprogram)
self.optionsmenu.add_command(label='Change Instructions', command=self.edit_instructions)
self.menubar.add_cascade(label='Options',menu=self.optionsmenu)
#Reports Menu
self.reportmenu = Menu(self.menubar,tearoff=0)
self.reportmenu.add_command(label='View Weekly Report',
command=self.weeklyReport)
self.reportmenu.add_command(label='View Monthly Report',
command=self.monthlyReport)
self.reportmenu.add_command(label='View Yearly Report',
command=self.yearlyReport)
self.menubar.add_cascade(label='Reports',menu=self.reportmenu)
#add menubar to grid
self.ciGui.config(menu=self.menubar)
#instructive labels
self.instructions = Text(self.gridframe, bd=4, width=20, font=('Helvetica', 12), wrap=WORD)
self.instructions.insert('1.0', "Questions to Ask:\n" +\
"\n1. Has anything changed in your "+\
"family composition?\n"+\
"\nReminders:\n"+\
"\n1. If family has infants, mention"+\
" the Alight Care Center offers "+\
"clothing and diapers.\n"+\
"\n2. If family has children, see if"+\
" there are any milk cards in the "+\
"drawer, and offer them one if "+\
"there is."
)
self.i_scroll = Scrollbar(self.gridframe)
self.instructions.config(yscrollcommand=self.i_scroll.set)
self.i_scroll.config(command=self.instructions.yview)
self.instructions.grid(row=14, column=0, rowspan=20, columnspan=2, padx=10)
self.i_scroll.grid(row=14, column=0, rowspan=20, columnspan=2, sticky=E+N+S, padx=10)
#Sets some sizing stuff
for i in range(0, 10):
self.ciGui.columnconfigure(i, weight=1, minsize=10)
for i in range(0, 30):
self.ciGui.rowconfigure(i, weight=1, minsize=10)
for i in range(7, 11):
self.ciGui.rowconfigure(i, weight=1, minsize=20)
self.ciGui.rowconfigure(18, weight=1, minsize=25)
#mainloop
self.ciGui.mainloop()
#DISPLAY SCREENS
def newClientDisplay(self):
"""This function will clear all irrelevant widgets, and
grid all widgets necessary for the new client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.addhhsep.grid(row=23,column=3,columnspan=40,sticky=EW, pady=10)
self.addhhtitle.grid(row=23,column=3,columnspan=12, sticky=W, pady=10)
self.famNum.grid(row=24, column=4)
self.entNum.grid(row=24, column=3)
self.newMembersB.grid(row=24, column=5)
self.newClientSave.grid(row=40,column=3, columnspan=2)
self.cancelNewB.grid(row=40, column=5, columnspan=2)
self.newvisitf()
self.saveVisit.grid_forget()
self.cancelVisit.grid_forget()
return
def updateClientDisplay(self):
"""This function will clear all irrelevant widgets and
grid all widgets necessary for the updating-client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.family_listbox.grid(row=24, column=3, rowspan=3, columnspan=2, sticky=W)
self.fam_scroll.grid(row=24, column=4, rowspan=3, columnspan=1, sticky=E+N+S)
self.addmemb.grid(row=24,column=5,sticky=E+N+S)
self.removmemb.grid(row=25,column=5,sticky=E+N+S)
self.viewmemb.grid(row=26,column=5,sticky=E+N+S)
self.housetitle.grid(row=23,column=3,columnspan=12, sticky=W)
self.houseSep.grid(row=23,column=3,columnspan=40,sticky=EW)
self.saveB.grid(row=28, column=3, columnspan=2)
self.cancelB.grid(row=28, column=5, columnspan=2)
return
#DISPLAY FOR SELECTED CLIENTS
def displayInfo(self, *args):
"""This function displays the information for a client that
has been selected in the client_listbox.
"""
try:
self.cursel = int(self.id_list[self.client_listbox.curselection()[0]])
info = select_client(self.cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
except IndexError:
pass
return
def displayNewInfo(self, client_id):
"""This function displays the information for a specified
client whose id is client_id.
"""
cursel = client_id
info = select_client(cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
return
#DISPLAY INFORMATION FUNCTIONS
def displayClientInfo(self, info, *args):
"""This function displays the client information.
"""
#retrieve info from dictionary
visitor = info["visitor"]
#set variables
self.fnv.set(visitor.firstname)
self.lnv.set(visitor.lastname)
month = self.month_int[visitor.dob.month]
self.mv.set(month)
self.dv.set(visitor.dob.day)
self.yv.set(visitor.dob.year)
self.phv.set(visitor.phone)
#parse and set datejoined
joined = str(visitor.dateJoined.month) + "/" +\
str(visitor.dateJoined.day) + "/" +\
str(visitor.dateJoined.year)
self.datejoinv.set(joined)
#set age
ad=str(age(visitor.dob))
a="Age: "
ad=str(a+ad)
self.agev.set(ad)
return
def displayHouseholdInfo(self, info, *args):
"""This function displays the household information for
a client.
"""
#retrieve info from dictionary
house = info["household"]
#set variables
self.adv.set(house.street)
self.apv.set(house.apt)
self.ctyv.set(house.city)
self.stav.set(house.state)
self.zpv.set(house.zip)
#check dateVerified, and set variables accordingly
if house.dateVerified != None:
month = house.dateVerified.month
self.mvv.set(self.month_int[month])
self.dvv.set(house.dateVerified.day)
self.yvv.set(house.dateVerified.year)
#parse and set label variables for all members
ad=str(info["agegroup_dict"]["adults"])
a="Adults: "
ad=str(a+ad)
self.adl.set(ad)
ch=str(info["agegroup_dict"]["children"])
c="Children: "
ch=c+ch
self.chil.set(ch)
sn=str(info["agegroup_dict"]["seniors"])
s="Seniors: "
sn=s+sn
self.sen.set(sn)
infa=str(info["agegroup_dict"]["infants"])
i="Infants: "
infa=i+infa
self.inf.set(infa)
tl = str(info["agegroup_dict"]["total"])
t="Total: "
tl = t+tl
self.tot.set(tl)
#grid family member labels
self.dispad.grid(row=22,column=3,sticky=W, pady=10)
self.dischil.grid(row=22,column=4,sticky=W)
self.dissen.grid(row=22,column=5,sticky=W)
self.disinf.grid(row=22,column=6,sticky=W)
self.distot.grid(row=22,column=7,sticky=W)
return
def displayVisitInfo(self, info, *args):
"""This function display the visit information for a client.
"""
self.clearVisits()
self.visitDict = {}
visitor = info["visitor"]
name = str(visitor.firstname)+ " " +str(visitor.lastname)
self.visv.set(name)
#visit info
visits = info["visit_list"]
if len(visits) == 0:
pass
else:
vdatelabs = []
vnlabs = []
vvisitors = []
vvols = []
vids = []
for v in visits:
d=str(v.date.month)+'/'+str(v.date.day)+'/'+str(v.date.year)
n=v.notes
vi=v.visitor
vol=v.volunteer
vid=v.visitID
vdatelabs.append(d)
vnlabs.append(n)
vvisitors.append(vi)
vvols.append(vol)
vids.append(vid)
#set variables to display first visit
self.visv.set(vvisitors[0])
self.volv.set(vvols[0])
self.notv.set(vnlabs[0])
self.notescv.config(state='normal')
self.notescv.insert('1.0', vnlabs[0])
self.notescv.config(state='disabled')
#save lists in dictionary
self.visitDict['dates'] = vdatelabs
self.visitDict['notes'] = vnlabs
self.visitDict['visitors'] = vvisitors
self.visitDict['volunteers'] = vvols
self.visitDict['ids'] = vids
for i in range(0, len(vdatelabs)):
self.visit_listbox.insert(i, vdatelabs[i])
self.visit_listbox.selection_set(0)
def displayVisit(self, *args):
"""This function will display the data for a visit when
a visit date is selected.
"""
try:
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
datev = int(self.visit_listbox.curselection()[0])
self.selectedVisit = datev
n = self.visitDict['notes']
vi = self.visitDict['visitors']
vol = self.visitDict['volunteers']
self.visv.set(vi[datev])
self.volv.set(vol[datev])
self.notv.set(n[datev])
notes = str(self.notv.get())
self.notescv.insert('1.0', notes)
self.notescv.config(state='disabled')
except IndexError:
pass
def displayHouseholdMem(self, info, *args):
"""This function displays the household information for a client.
"""
self.family_listbox.delete(0,END)
a=[]
del self.mem_list[:]
for member in info["member_list"]:
self.mem_list.append(member.id)
s=str(age(member.dob))
q='Age: '
s=q+s
x=(member.firstname, member.lastname,s)
a.append(x)
for i in range(len(a)):
self.family_listbox.insert(i,a[i])
#DISPLAY EXTRA ENTRY BOXES FOR ADDITIONAL FAMILY MEMBERS
#BUG: WHEN Add Member IS PRESSED MORE THAN ONCE, EXTRA
#BOXES HANG AROUND, AND ARE NEVER CLEARED
def familyEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
#clears any boxes already displayed
self.clearFamily()
try:
n = int(self.q.get())
except ValueError:
return
#add instructive labels to grid
self.famfn.grid(row=25,column=3)
self.famln.grid(row=25,column=4)
self.famdob.grid(row=25,column=5)
self.famphone.grid(row=25,column=8)
#create lists
fnames = []
lnames = []
mm = []
dd = []
yy = []
phnum = []
#create entry boxes, grid them, and append them to a list
for i in range(0, n):
fname = Entry(self.gridframe)
fname.grid(row=26+i, column=3)
fnames.append(fname)
lname = Entry(self.gridframe)
lname.grid(row=26+i, column=4)
lnames.append(lname)
month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#month.bind('<<ComboboxSelected>>', self.monthbox_select)
month.grid(row=26+i, column=5)
mm.append(month)
day = Spinbox(self.gridframe, from_=0, to=0, width=5)
day.grid(row=26+i, column=6)
dd.append(day)
year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
year.grid(row=26+i, column=7)
yy.append(year)
phone = Entry(self.gridframe)
phone.grid(row=26+i, column=8)
phnum.append(phone)
#add all lists to dictionary
self.memDict["first"] = fnames
self.memDict["last"] = lnames
self.memDict["mm"] = mm
self.memDict["dd"] = dd
self.memDict["yy"] = yy
self.memDict["phone"] = phnum
def addMemberEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
if self.addmemberON == True:
pass
else:
#add instructive labels to grid
self.famfn.grid(row=24,column=6) #, sticky=NE)
self.famln.grid(row=24,column=8) #, sticky=NE)
self.famdob.grid(row=25,column=6)
self.famphone.grid(row=26,column=6)
#create entry boxes, grid them, and append them to a list
#first name
self.fname = Entry(self.gridframe)
self.fname.grid(row=24, column=7, sticky=W)
self.memDict["first"]=[self.fname]
#last name
self.lname = Entry(self.gridframe)
self.lname.grid(row=24, column=9, sticky=W)
self.memDict["last"]=[self.lname]
#dob: month
self.month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#self.month.bind('<<ComboboxSelected>>', self.monthbox_select)
self.month.grid(row=25, column=7, sticky=W)
self.memDict["mm"]=[self.month]
#dob: day
self.day = Spinbox(self.gridframe, from_=0, to=0, width=5)
self.day.grid(row=25, column=8, sticky=W)
self.memDict["dd"]=[self.day]
#dob: year
self.year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
self.year.grid(row=25, column=9, sticky=W)
self.memDict["yy"]=[self.year]
#phone
self.phone = Entry(self.gridframe)
self.phone.grid(row=26, column=7, sticky=W)
self.memDict["phone"]=[self.phone]
#self.addmemberON = True
#CLEAR WIDGETS FUNCTIONS
def clearVisits(self):
"""This function clears the entry boxes/visit notes
used for visits.
"""
self.visit_listbox.delete(0, END)
self.visv.set("")
self.volv.set("")
self.notv.set("")
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.notescv.config(state='disabled')
visitob = [self.visit_listbox, self.visit_scroll, self.visitdate,
self.newVisit, self.editVisit, self.deleteVisit,
self.saveVisit, self.saveVisitE, self.cancelVisit]
for ob in visitob:
ob.grid_forget()
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit.grid(row=10, column=8, sticky=W)
def clearFamily(self):
#forgets additional family members
self.family_listbox.delete(0, END)
try:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
easylist = [mfname, mlname, mm, dd,
yy, phnum]
for i in range(0, 6):
for j in range(0, len(easylist[i])):
easylist[i][j].grid_forget()
for i in range(0, 6):
easylist[i] = []
self.memDict = {}
except KeyError:
pass
def clearEntries(self):
"""This function clears the entry boxes that will never be
removed from the display.
"""
allvaries = [self.fnv, self.lnv, self.phv, self.mv, self.dv, self.yv,
self.adv, self.apv, self.q, self.agev,
self.notv, self.volv, self.visv, self.adl, self.chil,
self.sen, self.inf, self.tot, self.datejoinv, self.mvv,
self.dvv, self.yvv]
#Clears the entryboxes
for i in range(0, len(allvaries)):
allvaries[i].set("")
#sets defaulted entries
today = datetime.now()
todaystr = str(today.month)+'/'+str(today.day)+\
'/'+str(today.year)
#self.visdatev.set(todaystr)
self.datejoinv.set(todaystr)
self.ctyv.set("Troy")
self.stav.set("NY")
self.zpv.set(12180)
#new client stuff
allforgets = [self.family_listbox,
self.fam_scroll, self.addmemb, self.removmemb,
self.viewmemb, self.housetitle, self.houseSep, self.saveB,
self.cancelB, self.dispad, self.dischil, self.dissen,
self.disinf, self.distot, self.addhhsep, self.addhhtitle,
self.famNum, self.entNum, self.newMembersB,
self.newClientSave, self.cancelNewB, self.famname,
self.famfn, self.famln, self.famdob, self.famphone,
self.fammon, self.famday, self.famyear]
for i in range(0, len(allforgets)):
allforgets[i].forget()
allforgets[i].grid_forget()
#forgets additional family members
#self.family_listbox.delete(0, END)
self.clearFamily()
#forgets previous visit notes
self.clearVisits()
self.visitDict = {}
def monthbox_select(self, *args):
"""This function is called when a month is selected from the
month combobox. It will look up the month in the month_day_dict,
and assign the right number of days to the "dob" spinbox.
"""
month = self.mv.get()
days = self.month_day_dict[month]
self.dob.config(from_=1, to=days)
return
#visit buttons
def newvisitf(self):
"""This function will clear unnecessary widgets, add an entrybox
for the date, and prepopulate the date, volunteer, and visitor fields.
"""
#clear Notes, Vol, & Visitor
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
#set date of visit to today
today = datetime.now()
tstr = str(today.month) + "/" + str(today.day) + "/" + str(today.year)
self.visdatev.set(tstr)
self.visitdate.grid(row=8, column=3)
#prepopulate volunteer
self.volv.set(self.volunteerName)
#prepopulate visitor (add test to see if this exists, in case of newclient)
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.saveVisit.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def editvisitf(self):
"""This function sets up a display identical to the "new visit"
display, but the date, visitor, notes, and volunteer are all
prepopulated with information from the database.
"""
#gridding
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
#set volunteer from database
self.volv.set(self.visitDict['volunteers'][self.selectedVisit])
#set visitor from database
self.visv.set(self.visitDict['visitors'][self.selectedVisit])
#set visdatev to Visit Date from database
vdate = self.visitDict['dates'][self.selectedVisit]
self.visdatev.set(vdate)
self.visitdate.grid(row=8, column=3)
self.notescv.config(state='normal')
self.saveVisitE.grid(row=8, column=8, sticky=W)
self.cancelVisit.grid(row=9, column=8, sticky=W)
def cancelvisitf(self):
"""This function will cancel a visit/changes to a visit,
and return to the normal visit display.
"""
self.clearVisits()
d = self.visitDict["dates"]
for i in range(0, len(d)):
self.visit_listbox.insert(i, d[i])
self.visit_listbox.selection_set(0)
self.displayVisit()
def savevisitf(self):
"""this will connect to Update Visit"""
try:
notes = str(self.notescv.get('1.0', END))
d = str(self.visdatev.get())
da = d.split('/')
dat = date(month=int(da[0]), day=int(da[1]), year=int(da[2]))
except:
self.error_popup("Check the visit date!")
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
update_vis(vid, dat, notes)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
def deletevisitf(self):
"""This function will delete the selected visit, first asking
the user to confirm the action, and will update the visit display
to reflect the change. This function connects to the "delete visit"
button.
"""
conf = messagebox.askquestion(
title='Confirm Delete',
message='Are you sure you want to delete this visit?')
if conf == 'yes':
idlist = self.visitDict['ids']
vid = idlist[self.selectedVisit]
remove_visit(vid)
#refresh screen
self.clearVisits()
pid = self.cursel
info = select_client(pid)
self.displayVisitInfo(info)
return
else:
return
def cancel_changes(self):
"""This function will clear the display and refill it with
the selected client's information from the database.
"""
self.updateClientDisplay()
self.displayInfo()
return
def quitprogram(self):
"""This function safely closes the database and
interface window.
"""
quit_session()
self.ciGui.destroy()
return
def logoff(self):
"""This function closes the database and interface window,
and returns to the volunteer login page.
"""
quit_session()
self.ciGui.destroy()
vo = cdbvolunteer.VolunteerDisplay()
return
def monthlyReport(self):
generate_monthly_report()
return
def yearlyReport(self):
generate_yearly_report()
return
def weeklyReport(self):
generate_weekly_report()
return
def error_popup(self, errmessage):
"""This function implements a simple pop-up window to warn user
about bad data entry.
"""
conf = messagebox.showerror(title='Error', message=errmessage)
def recordVisit(self):
"""This function will insert a new visit, clear old visit
display info, and reset the visit display.
"""
#inserts new visit
try:
vol_id = self.volID #int(self.volv.get())
except ValueError:
self.error_popup("Check volunteer id")
return
#get visit date
try:
dv = (str(self.visdatev.get())).split('/')
dvm = int(dv[0])
dvd = int(dv[1])
dvy = int(dv[2])
vdate = date(year=dvy, month=dvm, day=dvd)
except ValueError:
self.error_popup("Check visit date field!\n Enter: MM/DD/YYYY")
return
#get visit notes
try:
note = self.notescv.get("1.0", END)
except ValueError:
self.error_popup("Uh, oh! Better check the visit info!")
return
#create visitData object, and call function to record new visit
visitInfo = visitData(vol_id, visitDate=vdate, notes=note)
new_visit(self.cursel, visitInfo)
#clears old visit notes
self.clearVisits()
#refreshes visit note display
info = select_client(self.cursel)
self.displayVisitInfo(info)
#"Get All Input and Test It" functions
def getVisitorInput(self, ctype, cID=None):
"""This function tests all of the data for the visitor
entry boxes and returns an object.
"""
#Error checking for visitor's name and phone
try:
fname = str(self.fnv.get())
except ValueError:
self.error_popup("Check visitor's first name!")
return
try:
lname = str(self.lnv.get())
except ValueError:
self.error_popup("Check visitor's last name!")
return
try:
phnum = str(self.phv.get())
except ValueError:
self.error_popup("Check visitor's phone number!")
return
#Error checking for visitor's DOB
try:
month = str(self.mv.get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check visitor's month of birth!")
return
try:
dd = int(self.dv.get())
except ValueError:
self.error_popup("Check visitor's day of birth!")
return
try:
dy = int(self.yv.get())
except ValueError:
self.error_popup("Check visitor's year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dd)
except ValueError:
self.error_popup("Was an invalid day of birth chosen?")
return
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
if ctype == "old":
cd = oldClientData(cID, firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
elif ctype == "new":
cd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phnum, dateJoined=datejoined)
return cd
def getMemberInput(self, clist):
"""This function tests all of the input data for members
entry boxes and returns a data object.
"""
#Error checking for datejoined
try:
dj = (str(self.datejoinv.get())).split('/')
djm = int(dj[0])
djd = int(dj[1])
djy = int(dj[2])
datejoined = date(year=djy, month=djm, day=djd)
except ValueError:
self.error_popup("Check Date Joined field!\n Enter: MM/DD/YYYY")
return
#Check to see if any
if self.memDict != {}:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
for i in range(0, len(mfname)):
try:
fname = str(mfname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s first name!")
return
try:
lname = str(mlname[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s last name!")
return
try:
phn = str(phnum[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)+"'s phone!")
return
try:
month = str(mm[i].get())
dm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check family member "+str(i)\
+"'s month of birth!")
return
try:
dday = int(dd[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s day of birth!")
return
try:
dy = int(yy[i].get())
except ValueError:
self.error_popup("Check family member "+str(i)\
+"'s year of birth!")
return
try:
DOB = date(year=dy, month=dm, day=dday)
except ValueError:
self.error_popup("Was an invalid day of birth chosen for"\
" family member "+str(i)+"?")
return
ncd = newClientData(firstname=fname, lastname=lname,
dob=DOB, phone=phn, dateJoined=datejoined)
clist.append(ncd)
return clist
def getHouseholdInput(self):
"""This function tests all input for households in the household
entry boxes, and returns a data object.
"""
#get street address
try:
streeta = str(self.adv.get())
except ValueError:
self.error_popup("Check street address!")
return
#get city
try:
citya = str(self.ctyv.get())
except ValueError:
self.error_popup("Check city!")
return
#get state
try:
statea = str(self.stav.get())
except ValueError:
self.error_popup("Check state!")
return
#get zip code
try:
zipa = int(self.zpv.get())
except ValueError:
self.error_popup("Check zip code!")
return
#get apartment number
try:
apta = str(self.apv.get())
except ValueError:
self.error_popup("Check apartment number!")
return
#get date verified
if self.mvv.get() == self.dvv.get() == self.yvv.get() == "":
datev = None
else:
#get month
try:
month = str(self.mvv.get())
vm = self.int_month[month]
except ValueError and KeyError:
self.error_popup("Check month of date verified!")
return
#get day
try:
vd = int(self.dvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#get year
try:
vy = int(self.yvv.get())
except ValueError:
self.error_popup("Check day of date verified!")
return
#final date testing
try:
datev = date(year=vy, month=vm, day=vd)
except ValueError:
self.error_popup("Was an invalid day for date"\
+" verified chosen?")
return
houseInfo = houseData(street=streeta, city=citya, state=statea,
zip=zipa, apt=apta, dateVerified=datev)
return houseInfo
def getVisitInput(self):
"""This function tests all visit input and returns an object.
"""
#IMPLEMENT get volunteer id
try:
v = str(self.visdatev.get())
vd = v.split('/')
vdate = date(year=int(vd[2]), month=int(vd[0]), day=int(vd[1]))
except ValueError:
self.error_popup("Check the visit date!")
#get visit notes
try:
note = self.notescv.get("1.0", END)
except ValueError:
note = None
visitInfo = visitData(Vol_ID=self.volID, visitDate=vdate, notes=note)
return visitInfo
def addNew(self):
"""This function adds a new household to the database.
#NOTE: we need to check checkboxes for dummy addresses
#(domestic violence address, and homeless address)
"""
#Test all input and create newClientData object for visitor
cd = self.getVisitorInput("new")
clist = [cd]
newClientInfo_list = self.getMemberInput(clist)
houseInfo = self.getHouseholdInput()
visitInfo = self.getVisitInput()
#send all objects to new_household function
client_id = new_household(houseInfo, visitInfo, newClientInfo_list)
self.cursel = client_id
#refresh list of clients
self.clientlist = list_people()
#refresh screen
self.displayNewInfo(client_id)
def updateInfo(self, *args):
"""This function will update the visitor's information, the household
information, and the visit information. It will also add family members,
but it will NOT update the family members.
"""
sel_id = self.cursel
nclist = []
cd = self.getVisitorInput("old", cID=sel_id)
oldClientInfo_list = [cd]
houseInfo = self.getHouseholdInput()
newClientInfo_list = self.getMemberInput(nclist)
update_all(sel_id, houseInfo, oldClientInfo_list, newClientInfo_list)
#refresh list of clients
self.clientlist = list_people()
#refresh screen
#self.updateClientDisplay()
self.displayNewInfo(self.cursel)
def nameSearch(self, *args):
"""This function returns relevant results
"""
#removes old listbox contents
self.client_listbox.delete(0, END)
del self.id_list[:]
#get user input
name = str(self.ns.get())
nameC = name.capitalize()
#name = str(self.ns.get()).capitalize()
#NOTE:Get lowercase names, too
c = self.clientlist
#find matching names in list
found_clients = []
for i in range(len(c)):
if name in c[i][0] or nameC in c[i][0]:
found_clients.append(c[i])
found_clients.sort()
#listing just the names and addresses of the people
x=[]
for i in range(len(found_clients)):
dobstr=str(found_clients[i][1].month)+\
"/"+str(found_clients[i][1].day)+\
'/'+str(found_clients[i][1].year)
a=str(found_clients[i][0])+" --"+dobstr
x.append(a)
self.id_list.append(found_clients[i][2])
#insert results into listbox
for i in range(len(x)):
self.client_listbox.insert(i,x[i])
return
def runViewMember(self):
"""This function displays the information for a client that
has been selected in the family_listbox.
"""
try:
n = self.family_listbox.curselection()[0]
self.cursel = self.mem_list[n]
info = select_client(self.cursel)
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
except IndexError:
pass
return
def removeMemberConfirm(self):
n = self.family_listbox.curselection()[0]
tbd = self.mem_list[n]
conf = messagebox.askquestion(
title='Confirm Removal',
message='Are you sure you want to delete this client?')
if conf == 'yes':
remove_client(tbd)
self.updateInfo()
return
else:
return
def configure_background(self, *args):
"""This function takes in a string and, if it matches a
valid color, will set the color of the interface to
the new color.
"""
import tkinter.colorchooser as cc
color = cc.askcolor()
color_name = color[1]
self.bgcolor = color_name
self.ciGui.configure(background=self.bgcolor)
#for i in range(0, len(self.alllabs)):
# self.alllabs[i].configure(bg=self.bgcolor)
self.cslabel.configure(self.gridframe, bg=self.bgcolor)
#for lab in self.alllabs:
# lab.config(background=self.bgcolor)
return
#print(color_name)
def edit_instructions(self):
"""This function will allow a user to change the instructions
in the textbox on the side.
"""
|
ChristinaHammer/Client_Database
|
cdbgui.py
|
Python
|
mit
| 60,561
|
"""This module contains functions to :meth:`~reload` the database, load work and
citations from there, and operate BibTeX"""
import importlib
import re
import textwrap
import warnings
import subprocess
from copy import copy
from collections import OrderedDict
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
from .collection_helpers import oget, oset, dget, dset, dhas
from .collection_helpers import consume, setitem, callable_get
from .models import DB, Year
from .dbindex import parse_varname, year_file
from .utils import import_submodules
from .utils import parse_bibtex
from .rules import ConvertDict, ConvertWork, old_form_to_new
from . import config
WORK_CACHE = {}
CITATION_CACHE = {}
GROUP_CACHE = {}
def load_work():
"""Load a list of all work in the database"""
return list(DB.work())
def load_citations():
"""Load a list of all citations"""
return list(DB.citations())
def load_places_vars():
"""Load all places from the database
It generates tuples with variable name and Place object
Doctest:
.. doctest::
>>> 'arXiv' in [varname for varname, _ in load_places_vars()]
True
"""
places = config.MODULES["places"]
for varname, varvalue in places.__dict__.items():
if isinstance(varvalue, places.Place):
yield varname, varvalue
def load_work_map(year):
"""Load all work from a given year file
It generates tuples with variable name and Work object
Doctest:
.. doctest::
>>> reload()
>>> sorted([(work.year, key) for key, work in load_work_map(2015)])
[(2014, 'murta2014a'), (2015, 'pimentel2015a')]
(2014, 'murta2014a') appears because it has an alias in 2015
"""
module = "y{}.py".format(year) if isinstance(year, int) else year
if module not in WORK_CACHE:
module = "y9999.py"
worklist = WORK_CACHE[module]
for key, work in worklist.__dict__.items():
if isinstance(work, worklist.Work):
oset(work, "metakey", key)
yield key, work
def work_by_varname(varname, year=None):
"""Load work by varname
Doctest:
.. doctest::
>>> reload()
>>> work = work_by_varname('murta2014a')
>>> work.year
2014
"""
if year is None:
year = int(parse_varname(varname, 2) or -1)
module = "y{}.py".format(year) if isinstance(year, int) else year
if module not in WORK_CACHE:
return
worklist = WORK_CACHE[module]
return getattr(worklist, varname, None)
def load_work_map_all_years():
"""Load all work from all years
Doctest:
.. doctest::
>>> reload()
>>> sorted([(work.year, key) for key, work in load_work_map_all_years()])
[(2008, 'freire2008a'), (2014, 'murta2014a'), (2014, 'murta2014a'), (2015, 'pimentel2015a')]
(2014, 'murta2014a') appears twice because it has an alias in 2015
"""
years = reversed(sorted(WORK_CACHE.keys()))
for year in years:
yield from load_work_map(year)
def _clear_db():
"""Erase database"""
from .approaches import APPROACHES
APPROACHES.clear()
importlib.invalidate_caches()
DB.clear_places()
DB.clear_work()
DB.clear_citations()
def _reload_work():
"""Reload work and create WORD_CACHE"""
for key, module in import_submodules(config.MODULES["work"]).items():
yname = key.split(".")[-1]
fname = (yname + ".py")
WORK_CACHE[fname] = module
if not yname.startswith("y") or not yname[1:].isdigit():
warnings.warn(
"Invalid name for file {}. Year discovery may fail".format(key)
)
def reload(work_func=None):
"""Reload all the database
Doctest:
..doctest::
>>> reload()
>>> from snowballing.example.database.work.y2014 import murta2014a
>>> murta2014a.metakey
'murta2014a'
>>> from snowballing.example.database.work.y2015 import murta2014a as alias
>>> alias is murta2014a
True
"""
_clear_db()
if config.MODULES["places"]:
importlib.reload(config.MODULES["places"])
_reload_work()
import_submodules(config.MODULES["citations"])
import_submodules(config.MODULES["groups"])
if getattr(config, "CHECK_DEPRECATION", True):
check_config_deprecation()
for key, work in load_work_map_all_years():
oset(work, "metakey", key)
if work_func:
work_func(work, key)
for alias in config.get_work_aliases(work):
year = config.get_alias_year(work, alias)
module = "y{}.py".format(year) if isinstance(year, int) else year
if module not in WORK_CACHE:
module = "y9999.py"
setattr(WORK_CACHE[module], key, work)
def bibtex_to_info(citation, rules=None):
"""Convert BibTeX dict from bibtexparse to info dict for adding a db entry
Doctest:
.. doctest::
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J'})
{'place1': '', 'year': 0, 'name': 'a', 'authors': 'Pim, J', 'display': 'pim', 'pyref': 'pim0a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'year': '2017'})
{'place1': '', 'year': 2017, 'name': 'a', 'authors': 'Pim, J', 'display': 'pim', 'pyref': 'pim2017a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'year': '2017 [in press]'})
{'place1': '', 'year': 2017, 'name': 'a', 'authors': 'Pim, J', 'note': 'in press', 'display': 'pim', 'pyref': 'pim2017a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'pages': '1--5'})
{'place1': '', 'year': 0, 'name': 'a', 'authors': 'Pim, J', 'pp': '1--5', 'display': 'pim', 'pyref': 'pim0a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'journal': 'CiSE'})
{'place1': 'CiSE', 'year': 0, 'name': 'a', 'authors': 'Pim, J', 'place': 'CiSE', 'display': 'pim', 'pyref': 'pim0a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'ENTRYTYPE': 'article'})
{'place1': '', 'year': 0, 'name': 'a', 'authors': 'Pim, J', 'entrytype': 'article', 'display': 'pim', 'pyref': 'pim0a'}
>>> bibtex_to_info({'title': 'a', 'author': 'Pim, J', 'other': 'a'})
{'place1': '', 'year': 0, 'name': 'a', 'authors': 'Pim, J', 'display': 'pim', 'pyref': 'pim0a', 'other': 'a'}
"""
rules = rules or config.BIBTEX_TO_INFO
return ConvertDict(rules).run(citation)
def extract_info(article, rules=None):
"""Extract info from google scholar article
Doctest:
.. doctest::
Mock:
>>> class Article: pass
>>> article = Article()
>>> article.as_citation = lambda: '''
... @inproceedings{murta2014noworkflow,
... title={noWorkflow: capturing and analyzing provenance of scripts},
... author={Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana},
... booktitle={International Provenance and Annotation Workshop},
... pages={71--83},
... year={2014},
... organization={Springer}
... }'''
>>> article.attrs = {
... 'excerpt': ['Abstract'],
... 'cluster_id': ['5458343950729529273'],
... 'url_citations': ['http://scholar.google.com/scholar?cites=5458343950729529273&as_sdt=2005&sciodt=0,5&hl=en'],
... }
>>> article.div = None
Test:
>>> reload() # Deterministic name
>>> extract_info(article)
{'place1': 'International Provenance and Annotation Workshop', 'year': 2014, 'pp': '71--83', 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: capturing and analyzing provenance of scripts', 'entrytype': 'inproceedings', 'place': 'IPAW', 'display': 'murta', 'pyref': 'murta2014b', 'organization': 'Springer', 'ID': 'murta2014noworkflow', 'excerpt': 'Abstract', 'cluster_id': '5458343950729529273', 'scholar': 'http://scholar.google.com/scholar?cites=5458343950729529273&as_sdt=2005&sciodt=0,5&hl=en'}
"""
rules = rules or config.BIBTEX_TO_INFO
as_citation = article.as_citation()
if not isinstance(as_citation, str):
as_citation = as_citation.decode("utf-8")
citation = parse_bibtex(as_citation)[0]
converter = ConvertDict(rules)
return converter.run(citation, article=article)
def info_to_code(article, rules=None):
"""Convert info dict into code
Required attributes:
* pyref
* display
* year
* name
* place || place1
Doctest:
.. doctest::
>>> print(info_to_code({
... 'pyref': 'pimentel2017a',
... 'display': 'disp',
... 'year': 2017,
... 'name': 'snowballing',
... 'authors': 'Pimentel, Joao',
... 'place1': 'CACM'
... }))
<BLANKLINE>
pimentel2017a = DB(Work(
2017, "snowballing",
display="disp",
authors="Pimentel, Joao",
place1="CACM",
))
With place:
>>> print(info_to_code({
... 'pyref': 'murta2014a',
... 'display': 'noworkflow',
... 'year': 2014,
... 'name': 'noWorkflow: capturing and analyzing provenance of scripts',
... 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana',
... 'place': config.MODULES['places'].IPAW,
... }))
<BLANKLINE>
murta2014a = DB(Work(
2014, "noWorkflow: capturing and analyzing provenance of scripts",
display="noworkflow",
authors="Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana",
place=IPAW,
))
With string place:
>>> print(info_to_code({
... 'pyref': 'murta2014a',
... 'display': 'noworkflow',
... 'year': 2014,
... 'name': 'noWorkflow: capturing and analyzing provenance of scripts',
... 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana',
... 'place': 'IPAW',
... }))
<BLANKLINE>
murta2014a = DB(Work(
2014, "noWorkflow: capturing and analyzing provenance of scripts",
display="noworkflow",
authors="Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana",
place=IPAW,
))
With _work_type, due, excerpt, others:
>>> print(info_to_code({
... '_work_type': 'WorkSnowball',
... 'due': 'Unrelated to my snowballing',
... 'excerpt': 'Ignore excerpt',
... 'other': 'Do not ignore other fields',
... 'pyref': 'murta2014a',
... 'display': 'noworkflow',
... 'year': 2014,
... 'name': 'noWorkflow: capturing and analyzing provenance of scripts',
... 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana',
... 'place': config.MODULES['places'].IPAW,
... }))
<BLANKLINE>
murta2014a = DB(WorkSnowball(
2014, "noWorkflow: capturing and analyzing provenance of scripts",
due="Unrelated to my snowballing",
display="noworkflow",
authors="Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana",
place=IPAW,
other='Do not ignore other fields',
))
"""
rules = rules or config.INFO_TO_INSERT
info = copy(article)
converter = ConvertDict(rules)
return converter.run(info)
def set_by_info(work, info, set_scholar=True, rules=None):
"""Find attributes that should be modified in a work object to make it match an info object"""
rules = rules or config.BIBTEX_TO_INFO
rules.get("<set_before>", lambda x, y: None)(work, info)
work_keys = {k for k in work.__dict__.keys() if not k.startswith("__")} - rules["<set_ignore_keys>"]
meta_keys = info.keys() - rules.get("<set_ignore_keys>", set())
show_result = OrderedDict(
(key, None) for key in rules.get("<set_order>", [])
)
set_result = {}
shared = meta_keys & work_keys
for key in shared:
value = info[key]
add = False
if key in rules.get("<set_ignore_but_show>", set()):
add = True
elif getattr(work, key) != value and key not in getattr(work, rules.get("<set_ignore_attr>", "ignoreattrs"), set()):
add = True
set_result[key] = (value, getattr(work, key))
elif key in rules.get("<set_always_show>", set()):
add = True
if add:
show_result[key] = (value, getattr(work, key))
for key in meta_keys - work_keys:
value = info[key]
set_result[key] = (value, None)
show_result[key] = (value, "")
if set_scholar and rules.get("<scholar_ok>") and not hasattr(work, rules["<scholar_ok>"]):
set_result[rules["<scholar_ok>"]] = (True, None)
result = {
"show": show_result,
"set": set_result,
}
if "<pos_diff>" in rules:
rules["<pos_diff>"](work, info, result)
return result
def changes_dict_to_set_attribute(metakey, changes_dict, end=";"):
"""Convert dictionart of changes to set_attribute instructions"""
result = []
for key, (value, old) in changes_dict.items():
result.append("set_attribute({!r}, {!r}, {!r}, old={!r})".format(metakey, key, value, old))
return "\n".join(result) + end
def citation_text(workref, cited, ref="", backward=False):
"""Create code for citation
Arguments:
* `workref` -- work varname that is cited (by default)
* `cited` -- work info dict that cites the work (by default)
Keyword arguments:
* `ref` -- citation number
* `backward` -- invert citation: `workref` cites `cited`
Doctest:
.. doctest::
>>> print(citation_text('freire2008a', {'pyref': 'murta2014a'}))
<BLANKLINE>
DB(Citation(
murta2014a, freire2008a, ref="",
contexts=[
<BLANKLINE>
],
))
<BLANKLINE>
>>> print(citation_text('pimentel2015a', {'pyref': 'murta2014a'}, backward=True, ref="[8]"))
<BLANKLINE>
DB(Citation(
pimentel2015a, murta2014a, ref="[8]",
contexts=[
<BLANKLINE>
],
))
<BLANKLINE>
"""
pyref = dget(cited, "pyref")
thepyref = pyref
if backward:
pyref, workref = workref, pyref
return textwrap.dedent("""
DB(Citation(
{pyref}, {workref}, ref="{ref}",
contexts=[
],
))
""".format(**locals()))
def compare_paper_to_work(letter, key, work, paper):
"""Compares paper info to work
Arguments:
* `letter` -- indicates last letter
* `key` -- indicates the key ID in BibTeX
* `work` -- work object
* `paper` -- paper info dict
Returns: work, letter
* If it doesn't match, work is None
Doctest:
.. doctest::
>>> reload()
>>> work = work_by_varname('murta2014a')
Fail:
>>> paper = {'pyref': 'pimentel2017a', 'authors': 'Pimentel, Joao', 'name': 'Other', 'year': 2017}
>>> compare_paper_to_work(ord("a") - 1, 'pimentel2017a', work, paper)
(None, 98)
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper)
(None, 96)
Cluster ID:
>>> paper['cluster_id'] = '5458343950729529273'
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper) == (work, 96)
True
Alias:
>>> paper = {'pyref': 'chirigati2015a', 'authors': 'Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: Capturing and Analyzing Provenance of Scripts', 'year': 2015}
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper) == (work, 96)
True
Name:
>>> paper = {'pyref': 'murta2014a', 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: capturing and analyzing provenance of scripts', 'year': 2014}
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper) == (work, 96)
True
Similar Name fail:
>>> paper = {'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: capturing provenance of scripts', 'year': 2014}
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper)
(None, 96)
Similar Name works due to same place:
>>> paper = {'pyref': 'murta2014a', 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: capturing provenance of scripts', 'year': 2014, 'place': 'IPAW'}
>>> compare_paper_to_work(ord("a") - 1, 'other2017a', work, paper) == (work, 96)
True
"""
if work is None:
return None, letter
if key.startswith(dget(paper, "pyref", "<invalid>")[:-1]):
lastletter = key[-1] if key[-1].isalpha() else "a"
letter = max(ord(lastletter) + 1, letter)
if config.info_work_match(paper, work):
dset(paper, "pyref", key)
return work, letter
return None, letter
def find_work_by_info(paper, pyrefs=None, rules=None):
"""Find work by paper info dict
Limits search for specific year (or all years, if year is 0)
Generates 'place' based on 'entrytype'
Converts 'school' -> 'local'
Tries to get varname from 'ID' in case the bibtex were generated from our db
If it finds the work, it returns it
Otherwise, it updates pyref and display to include a valid letter
Doctest:
.. doctest::
>>> reload()
>>> work = work_by_varname('murta2014a')
>>> paper = {'pyref': 'murta2014a', 'authors': 'Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana', 'name': 'noWorkflow: capturing and analyzing provenance of scripts', 'year': 2014}
>>> find_work_by_info(paper) == work
True
>>> paper = {'pyref': 'murta2014a', 'authors': 'Murta, Leonardo', 'name': 'Other', 'year': 2014, 'display': 'murta'}
>>> find_work_by_info(paper) is None
True
>>> paper['pyref']
'murta2014b'
>>> paper['display']
'murta b'
"""
rules = rules or config.FIND_INFO_WORK
def update_old(old, new, rules):
ignore = callable_get(rules, "<ignore>", [])
for key, value in new.items():
if key not in ignore:
old[key] = value
for key, value in rules.get("<skip>", []):
if paper.get(key, "") == value:
dset(paper, "pyref", "None")
return None
pyrefs = pyrefs or set()
letter = ord("a") - 1
convert = ConvertDict(rules)
new_paper = convert.run(paper)
old_paper, paper = paper, new_paper
worklist = load_work_map(paper["_year"])
if paper["_year"] == 0:
worklist = load_work_map_all_years()
if "_work" in paper:
key = paper["_key"]
work = paper["_work"]
work, letter = compare_paper_to_work(letter, key, work, paper)
if work:
update_old(old_paper, paper, rules)
return work
for key, work in worklist:
work, letter = compare_paper_to_work(letter, key, work, paper)
if work:
update_old(old_paper, paper, rules)
return work
for key in pyrefs:
if dhas(paper, "pyref") and key.startswith(dget(paper, "pyref")):
lastletter = key[-1] if key[-1].isalpha() else "a"
letter = max(ord(lastletter) + 1, ord(letter))
if letter != ord("a") - 1:
letter = chr(letter)
config.set_info_letter(paper, letter)
update_old(old_paper, paper, rules)
return None
def find_citation(citer, cited):
"""Find citation in the local database
Returns the citation if the `citer` work cites the `cited` work
Doctest:
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> freire2008a = work_by_varname("freire2008a")
>>> pimentel2015a = work_by_varname("pimentel2015a")
>>> citation = find_citation(murta2014a, freire2008a)
>>> citation is None
False
>>> citation.ref
'5'
Not found:
>>> citation = find_citation(pimentel2015a, freire2008a)
>>> citation is None
True
"""
for citation in load_citations():
if citation.work == citer and citation.citation == cited:
return citation
return None
def find_global_local_citation(citer, cited, file=None):
"""Find citations locally and globally for the works
We use it to check if there is citation redefinition
Doctest:
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> freire2008a = work_by_varname("freire2008a")
>>> pimentel2015a = work_by_varname("pimentel2015a")
>>> glo, loc = find_global_local_citation(murta2014a, freire2008a, "random")
>>> glo is None
False
>>> glo.ref
'5'
>>> loc is None
True
>>> fname = "murta2014a"
>>> glo, loc = find_global_local_citation(murta2014a, freire2008a, fname)
>>> glo is None
False
>>> glo.ref
'5'
>>> loc is None
False
>>> loc is glo
True
"""
glob, loc = None, None
for citation in load_citations():
if citation.work == citer and citation.citation == cited:
if file == citation._citations_file or not file:
glob = loc = citation
break
else:
glob = citation
return glob, loc
def find_local_citation(wo1, wo2, backward, citation_file=None, warning=None):
if backward:
wo1, wo2 = wo2, wo1
global_citation, local_citation = find_global_local_citation(
wo1, wo2,
file=citation_file
)
if global_citation and not local_citation and warning:
warning("Duplicate citation: {} -> {}".format(
oget(wo1, "metakey"),
oget(wo2, "metakey"),
))
return local_citation
def work_to_bibtex_entry(work, name=None, homogeneize=True, acronym=False, rules=None):
"""Convert work to BibTeX entry dict for bibtexparser
Doctest:
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> result = work_to_bibtex_entry(murta2014a)
>>> list(result)
['ID', 'address', 'publisher', 'pages', 'author', 'title', 'ENTRYTYPE', 'booktitle', 'year']
>>> result['ID']
'murta2014a'
>>> result['address']
'Cologne, Germany'
>>> result['publisher']
'Springer'
>>> result['pages']
'71--83'
>>> result['booktitle']
'International Provenance and Annotation Workshop'
>>> result['author'] # doctest: +ELLIPSIS
'Murta, Leonardo and Braganholo, Vanessa and ... and Freire, Juliana'
>>> result['title']
'no{W}orkflow: capturing and analyzing provenance of scripts'
>>> result['year']
'2014'
>>> result['ENTRYTYPE']
'inproceedings'
Custom name:
>>> result = work_to_bibtex_entry(murta2014a, name="other")
>>> list(result)
['ID', 'address', 'publisher', 'pages', 'author', 'title', 'ENTRYTYPE', 'booktitle', 'year']
>>> result['ID']
'other'
Use acronym for place name:
>>> result = work_to_bibtex_entry(murta2014a, acronym=True)
>>> list(result)
['ID', 'address', 'publisher', 'pages', 'author', 'title', 'ENTRYTYPE', 'booktitle', 'year']
>>> result['booktitle']
'IPAW'
"""
converter = ConvertWork(rules or config.WORK_TO_BIBTEX)
return converter.run(work, new=OrderedDict({
"_name": name,
"_acronym": acronym,
"_homogeneize": homogeneize,
}))
def work_to_bibtex(work, name=None, acronym=False, rules=None):
"""Convert work to bibtex text
Doctest:
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> print(work_to_bibtex(murta2014a))
@inproceedings{murta2014a,
address = {Cologne, Germany},
author = {Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana},
booktitle = {International Provenance and Annotation Workshop},
pages = {71--83},
publisher = {Springer},
title = {no{W}orkflow: capturing and analyzing provenance of scripts},
year = {2014}
}
<BLANKLINE>
<BLANKLINE>
Custom name:
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> print(work_to_bibtex(murta2014a, name="other"))
@inproceedings{other,
address = {Cologne, Germany},
author = {Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana},
booktitle = {International Provenance and Annotation Workshop},
pages = {71--83},
publisher = {Springer},
title = {no{W}orkflow: capturing and analyzing provenance of scripts},
year = {2014}
}
<BLANKLINE>
<BLANKLINE>
Use acronym for place name:
>>> print(work_to_bibtex(murta2014a, acronym=True))
@inproceedings{murta2014a,
address = {Cologne, Germany},
author = {Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana},
booktitle = {IPAW},
pages = {71--83},
publisher = {Springer},
title = {no{W}orkflow: capturing and analyzing provenance of scripts},
year = {2014}
}
<BLANKLINE>
<BLANKLINE>
"""
result = work_to_bibtex_entry(work, name=name, acronym=acronym, rules=rules)
db = BibDatabase()
db.entries = [result]
writer = BibTexWriter()
writer.indent = " "
return writer.write(db)
def match_bibtex_to_work(bibtex_str):
"""Find works by bibtex entries
Returns a list of matches: (entry, work)
Doctest:
.. doctest::
>>> reload()
>>> bibtex = ''' @inproceedings{murta2014a,
... address = {Cologne, Germany},
... author = {Murta, Leonardo and Braganholo, Vanessa and Chirigati, Fernando and Koop, David and Freire, Juliana},
... booktitle = {IPAW},
... pages = {71--83},
... publisher = {Springer},
... title = {no{W}orkflow: capturing and analyzing provenance of scripts},
... year = {2014}
... } '''
>>> works = match_bibtex_to_work(bibtex)
>>> murta2014a = work_by_varname("murta2014a")
>>> works[0][1] is murta2014a
True
"""
entries = parse_bibtex(bibtex_str)
return [
(entry, find_work_by_info(bibtex_to_info(copy(entry))))
for entry in entries
]
def find(text):
"""Find work by text in any of its attributes"""
words = text.split()
for work in load_work():
match = True
for word in words:
if not any(word.lower() in str(getattr(work, attr)).lower() for attr in dir(work) if not attr.startswith("_")):
match = False
break
if match:
yield work
def find_line(work):
"""Find work position in file
Arguments:
* `work` -- work object
Doctest:
.. doctest::
>>> from .operations import reload, work_by_varname
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> find_line(murta2014a)
6
"""
import re
with open(year_file(oget(work, "year")), "rb") as f:
return [
index
for index, line in enumerate(f)
if re.findall("(^{}\\s=)".format(oget(work, "metakey")).encode(), line)
][0] + 1
def invoke_editor(work):
"""Open work in a given line with the configured editor"""
if not config.TEXT_EDITOR or not config.LINE_PARAMS:
warnings.warn("You must set the config.TEXT_EDITOR and config.LINE_PARAMS to use this function")
return
subprocess.call((
config.TEXT_EDITOR + " " +
config.LINE_PARAMS.format(
year_path=year_file(oget(work, "year")),
line=find_line(work)
)
), shell=True)
def create_info_code(nwork, info, citation_var, citation_file, should_add, ref=""):
"""Create insertion code with both code and citation"""
citations = ""
text = "insert('''"
if nwork is None:
text += info_to_code(info) + "\n"
if should_add["citation"] and citation_var:
text += citation_text(
citation_var, info,
ref=ref, backward=should_add["backward"]
) + "\n"
citations = ", citations='{}'".format(citation_file)
text += "'''{});".format(citations)
if text == "insert('''''');":
text = ""
if nwork and should_add["set"] and "(" not in dget(info, "pyref"):
text += "\n" + changes_dict_to_set_attribute(dget(info, "pyref"), should_add["set"])
return {
"code": text.strip(),
"extra": config.check_insertion(
nwork, info, citation_var, citation_file, should_add, ref=""
)
}
def should_add_info(
info, citation, article=None, backward=False, citation_file=None,
warning=lambda x: None, set_scholar=False,
article_rules=None, bibtex_rules=None,
add_citation=True
):
"""Check if there is anything to add for this info"""
convert = ConvertDict(article_rules or config.ARTICLE_TO_INFO)
info = convert.run(info, article=article)
nwork = consume(info, "_nwork")
should_add = {
"add": False,
"citation": citation,
"set": {},
"backward": backward,
}
if not nwork or (not citation and add_citation):
should_add["add"] = True
should_add["citation"] = citation
return should_add, nwork, info
changes = set_by_info(nwork, info, set_scholar=set_scholar, rules=bibtex_rules or config.BIBTEX_TO_INFO)
should_add["set"] = changes["set"]
if should_add["set"]:
should_add["add"] = True
if add_citation:
local_citation = find_local_citation(
nwork, citation, backward,
citation_file=citation_file, warning=warning
)
if local_citation:
should_add["citation"] = None
else:
should_add["add"] = True
return should_add, nwork, info
class Metakey(object):
"""Convert work or list of work to metakey
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> murta2014a @ Metakey()
'murta2014a'
>>> [murta2014a] @ Metakey()
['murta2014a']
"""
def __rmatmul__(self, x):
if hasattr(x, "__iter__"):
return [y @ self for y in x]
return oget(x, "metakey")
class MetakeyTitle(object):
"""Convert work or list of work to metakey - title
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> murta2014a @ MetakeyTitle()
'murta2014a - noWorkflow: capturing and analyzing provenance of scripts'
>>> [murta2014a] @ MetakeyTitle()
['murta2014a - noWorkflow: capturing and analyzing provenance of scripts']
"""
def __rmatmul__(self, x):
if hasattr(x, "__iter__"):
return [y @ self for y in x]
return "{} - {}".format(
oget(x, "metakey"),
oget(x, "name"),
)
class WDisplay(object):
"""Convert work or list of work to display
.. doctest::
>>> reload()
>>> murta2014a = work_by_varname("murta2014a")
>>> murta2014a @ WDisplay()
'no Work flow'
>>> [murta2014a] @ WDisplay()
['no Work flow']
"""
def __rmatmul__(self, x):
if hasattr(x, "__iter__"):
return [y @ self for y in x]
return config.work_display(x)
metakey = Metakey()
metakey_title = MetakeyTitle()
wdisplay = WDisplay()
def check_config_deprecation():
if hasattr(config, "WORK_BIBTEX_MAP"):
warnings.warn(textwrap.dedent("""The configuration config.WORK_BIBTEX_MAP is not supported anymore.
It was replaced by config.WORK_TO_BIBTEX, which is more complete.
Please, modify it according to your needs
"""))
if hasattr(config, "FORM_BUTTONS"):
old_form_to_new(show_deprecation=True)
|
JoaoFelipe/snowballing
|
snowballing/operations.py
|
Python
|
mit
| 33,262
|
#!/usr/bin/env python3
import sys
sys.path.insert(0, '/Users/neo/workspace/devops')
from netkiller.docker import *
# from environment.experiment import experiment
# from environment.development import development
# from environment.production import production
from compose.devops import devops
from compose.demo import demo
# from libexec.portainer import portainer
# print(test)
# exit()
if __name__ == "__main__":
try:
docker = Docker()
# docker.env({'DOCKER_HOST':'ssh://root@192.168.30.13','COMPOSE_PROJECT_NAME':'experiment'})
# docker.sysctl({"vm.max_map_count": "262144"})
# docker.environment(experiment)
# docker.environment(development)
# docker.environment(logging)
docker.environment(devops)
# docker.environment(portainer)
docker.environment(demo)
docker.main()
except KeyboardInterrupt:
print("Crtl+C Pressed. Shutting down.")
|
oscm/devops
|
docker/docker.py
|
Python
|
mit
| 942
|
from setuptools import setup
setup(
name='nspawn-api',
packages=['nspawn'],
include_package_data=True,
install_requires=[
'gunicorn',
'nsenter',
'flask',
'pydbus',
'supervisor'
],
)
|
dincamihai/nspawn-api
|
setup.py
|
Python
|
mit
| 243
|
#coding:utf-8
LIST_NUM = [(1,4),(5,1),(2,3),(6,9),(7,1)]
'''
用max函数获取到元素的最大值,然后用冒泡进行排序
'''
for j in range(len(LIST_NUM) -1):
for i in range(len(LIST_NUM) -1):
if max(LIST_NUM[i]) > max(LIST_NUM[i + 1]):
A = LIST_NUM[i]
LIST_NUM[i] = LIST_NUM[i + 1]
LIST_NUM[i + 1] = A
print LIST_NUM
|
51reboot/actual_09_homework
|
04/guantao/list.py
|
Python
|
mit
| 382
|
from psyrun.backend import DistributeBackend, LoadBalancingBackend
from psyrun.mapper import (
map_pspace,
map_pspace_parallel,
map_pspace_hdd_backed)
from psyrun.pspace import Param
from psyrun.scheduler import ImmediateRun, Sqsub
from psyrun.store import DefaultStore, PickleStore
from psyrun.version import version as __version__
|
jgosmann/psyrun
|
psyrun/__init__.py
|
Python
|
mit
| 345
|
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import platform
import sys
import traceback
from contextlib import contextmanager
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.compat import safe_getattr
from _pytest.fixtures import FixtureRequest
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super(MultipleDoctestFailures, self).__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? %s %s" % (indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += traceback.format_exception(*failure.exc_info)
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
_fix_spoof_python2(runner, encoding)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj):
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware():
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = getattr(inspect, "unwrap", None)
if real_unwrap is None:
yield
else:
def _mock_aware_unwrap(obj, stop=None):
if stop is None:
return real_unwrap(obj, stop=_is_mocked)
else:
return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find(
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
DOCTEST_REPORT_CHOICE_NONE: 0,
}[key]
def _fix_spoof_python2(runner, encoding):
"""
Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
should patch only doctests for text files because they don't have a way to declare their
encoding. Doctests in docstrings from Python modules don't have the same problem given that
Python already decoded the strings.
This fixes the problem related in issue #2434.
"""
from _pytest.compat import _PY2
if not _PY2:
return
from doctest import _SpoofOut
class UnicodeSpoof(_SpoofOut):
def getvalue(self):
result = _SpoofOut.getvalue(self)
if encoding and isinstance(result, bytes):
result = result.decode(encoding)
return result
runner._fakeout = UnicodeSpoof()
@pytest.fixture(scope="session")
def doctest_namespace():
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()
|
hackebrot/pytest
|
src/_pytest/doctest.py
|
Python
|
mit
| 18,770
|
# -*- coding: UTF-8 -*-
import ldap
class Connection:
def __init__(self, url='ldap://annuaire.math.univ-paris-diderot.fr:389', \
base='ou=users,dc=chevaleret,dc=univ-paris-diderot,dc=fr'):
self.base = base
self.url = url
self.con = ldap.initialize(self.url)
self.con.bind_s('', '')
def __del__(self):
self.con.unbind()
def search(self, kw, field='cn'):
"""
Search someone in the LDAP directory using a keyword
"""
qry = "%s=*%s*" % (field, kw)
return self.con.search_s(self.base, ldap.SCOPE_SUBTREE, qry, None)
|
bfontaine/p7ldap
|
p7ldap/connection.py
|
Python
|
mit
| 619
|
"""Approximating spectral functions with tensor networks.
"""
import numpy as np
import random
import quimb as qu
from .tensor_gen import MPO_rand, MPO_zeros_like
def construct_lanczos_tridiag_MPO(A, K, v0=None, initial_bond_dim=None,
beta_tol=1e-6, max_bond=None, seed=False,
v0_opts=None, k_min=10):
"""
"""
if initial_bond_dim is None:
initial_bond_dim = 8
if max_bond is None:
max_bond = 8
if v0 is None:
if seed:
# needs to be truly random so MPI processes don't overlap
qu.seed_rand(random.SystemRandom().randint(0, 2**32 - 1))
V = MPO_rand(A.nsites, initial_bond_dim,
phys_dim=A.phys_dim(), dtype=A.dtype)
else: # normalize
V = v0 / (v0.H @ v0)**0.5
Vm1 = MPO_zeros_like(V)
alpha = np.zeros(K + 1)
beta = np.zeros(K + 2)
bsz = A.phys_dim()**A.nsites
beta[1] = bsz # == sqrt(prod(A.shape))
compress_kws = {'max_bond': max_bond, 'method': 'svd'}
for j in range(1, K + 1):
Vt = A.apply(V, compress=True, **compress_kws)
Vt.add_MPO(-beta[j] * Vm1, inplace=True, compress=True, **compress_kws)
alpha[j] = (V.H @ Vt).real
Vt.add_MPO(-alpha[j] * V, inplace=True, compress=True, **compress_kws)
beta[j + 1] = (Vt.H @ Vt)**0.5
# check for convergence
if abs(beta[j + 1]) < beta_tol:
yield alpha[1:j + 1], beta[2:j + 2], beta[1]**2 / bsz
break
Vm1 = V.copy()
V = Vt / beta[j + 1]
if j >= k_min:
yield (np.copy(alpha[1:j + 1]),
np.copy(beta[2:j + 2]),
np.copy(beta[1])**2 / bsz)
|
jcmgray/quijy
|
quimb/tensor/tensor_approx_spectral.py
|
Python
|
mit
| 1,751
|
import sublime
import sublime_plugin
from ..core import oa_syntax, decorate_pkg_name
from ..core import ReportGenerationThread
from ...lib.packages import PackageList
###----------------------------------------------------------------------------
class PackageReportThread(ReportGenerationThread):
"""
Generate a tabular report of all installed packages and their state.
"""
def _process(self):
pkg_list = PackageList()
pkg_counts = pkg_list.package_counts()
title = "{} Total Packages".format(len(pkg_list))
t_sep = "=" * len(title)
fmt = '{{:>{}}}'.format(len(str(max(pkg_counts))))
stats = ("{0} [S]hipped with Sublime\n"
"{0} [I]nstalled (user) sublime-package files\n"
"{0} [U]npacked in Packages\\ directory\n"
"{0} Currently in ignored_packages\n"
"{0} Installed Dependencies\n").format(fmt).format(*pkg_counts)
row = "| {:<40} | {:3} | {:3} | {:<3} |".format("", "", "", "")
r_sep = "+------------------------------------------+-----+-----+-----+"
packages = {}
result = [title, t_sep, "", self._generation_time(), stats, r_sep]
for pkg_name, pkg_info in pkg_list:
packages[pkg_name] = pkg_info.status(detailed=False)
result.append(
"| {:<40} | [{:1}] | [{:1}] | [{:1}] |".format(
decorate_pkg_name(pkg_info, name_only=True),
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not None else " "))
result.extend([r_sep, ""])
self._set_content("OverrideAudit: Package Report", result, ":packages",
oa_syntax("OA-PkgReport"), {
"override_audit_report_packages": packages,
"context_menu": "OverrideAuditReport.sublime-menu"
})
###----------------------------------------------------------------------------
class OverrideAuditPackageReportCommand(sublime_plugin.WindowCommand):
"""
Generate a tabular report of all installed packages and their state.
"""
def run(self, force_reuse=False):
PackageReportThread(self.window, "Generating Package Report",
self.window.active_view(),
force_reuse=force_reuse).start()
###----------------------------------------------------------------------------
#
|
OdatNurd/OverrideAudit
|
src/commands/package_report.py
|
Python
|
mit
| 2,593
|
import numpy as np
import cv2
from sys import argv
class Test:
def __init__(self, name, image):
self.image = image
self.name = name
self.list = []
def add(self, function):
self.list.append(function)
def run(self):
cv2.imshow(self.name, self.image)
for function in self.list:
cv2.waitKey()
self.image = function(self.image)
cv2.imshow(self.name, self.image)
cv2.waitKey()
def grayscale(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return image
def median(image):
cv2.medianBlur(image, 9, image)
return image
def unsharp(image):
image2 = cv2.GaussianBlur(image, (21,21), 21)
iamge = cv2.addWeighted(image, 1.5, image2, -0.5, 0, image)
return image
def harris(image):
x33 = image.shape[1] / 3
x66 = image.shape[1] / 3 * 2
dst1 = cv2.goodFeaturesToTrack(image[:,:x33], 10, 0.1, 5)
mean1 = np.uint8(cv2.mean(dst1))
cv2.circle(image, (mean1[0], mean1[1]), 2, 255)
dst2 = cv2.goodFeaturesToTrack(image[:,x66:], 10, 0.1, 5)
dst2 += [x66, 0]
mean2 = np.uint8(cv2.mean(dst2))
cv2.circle(image, (mean2[0], mean2[1]), 2, 255)
return image
if __name__ == '__main__':
image = cv2.imread(argv[1])
test = Test('Test', image)
test.add(grayscale)
test.add(median)
test.add(harris)
test.run()
|
JacobMiki/Emotionnaise-python
|
test.py
|
Python
|
mit
| 1,452
|
"""
Component that performs TensorFlow classification on images.
For a quick start, pick a pre-trained COCO model from:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.tensorflow/
"""
import logging
import sys
import os
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE, CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['numpy==1.15.3', 'pillow==5.2.0', 'protobuf==3.6.1']
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_FILE_OUT = 'file_out'
CONF_MODEL = 'model'
CONF_GRAPH = 'graph'
CONF_LABELS = 'labels'
CONF_MODEL_DIR = 'model_dir'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_AREA = 'area'
CONF_TOP = 'top'
CONF_LEFT = 'left'
CONF_BOTTOM = 'bottom'
CONF_RIGHT = 'right'
AREA_SCHEMA = vol.Schema({
vol.Optional(CONF_TOP, default=0): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float
})
CATEGORY_SCHEMA = vol.Schema({
vol.Required(CONF_CATEGORY): cv.string,
vol.Optional(CONF_AREA): AREA_SCHEMA
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_GRAPH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [vol.Any(
cv.string,
CATEGORY_SCHEMA
)])
})
})
def draw_box(draw, box, img_width,
img_height, text='', color=(255, 255, 0)):
"""Draw bounding box on image."""
ymin, xmin, ymax, xmax = box
(left, right, top, bottom) = (xmin * img_width, xmax * img_width,
ymin * img_height, ymax * img_height)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=5, fill=color)
if text:
draw.text((left, abs(top-15)), text, fill=color)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TensorFlow image processing platform."""
model_config = config.get(CONF_MODEL)
model_dir = model_config.get(CONF_MODEL_DIR) \
or hass.config.path('tensorflow')
labels = model_config.get(CONF_LABELS) \
or hass.config.path('tensorflow', 'object_detection',
'data', 'mscoco_label_map.pbtxt')
# Make sure locations exist
if not os.path.isdir(model_dir) or not os.path.exists(labels):
_LOGGER.error("Unable to locate tensorflow models or label map.")
return
# append custom model path to sys.path
sys.path.append(model_dir)
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# pylint: disable=unused-import,unused-variable
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf # noqa
from object_detection.utils import label_map_util # noqa
except ImportError:
# pylint: disable=line-too-long
_LOGGER.error(
"No TensorFlow Object Detection library found! Install or compile "
"for your system following instructions here: "
"https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md") # noqa
return
try:
# Display warning that PIL will be used if no OpenCV is found.
# pylint: disable=unused-import,unused-variable
import cv2 # noqa
except ImportError:
_LOGGER.warning("No OpenCV library found. "
"TensorFlow will process image with "
"PIL at reduced resolution.")
# setup tensorflow graph, session, and label map to pass to processor
# pylint: disable=no-member
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_config.get(CONF_GRAPH), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
session = tf.Session(graph=detection_graph)
label_map = label_map_util.load_labelmap(labels)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
entities = []
for camera in config[CONF_SOURCE]:
entities.append(TensorFlowImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
session, detection_graph, category_index, config))
add_entities(entities)
class TensorFlowImageProcessor(ImageProcessingEntity):
"""Representation of an TensorFlow image processor."""
def __init__(self, hass, camera_entity, name, session, detection_graph,
category_index, config):
"""Initialize the TensorFlow entity."""
model_config = config.get(CONF_MODEL)
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
self._name = "TensorFlow {0}".format(
split_entity_id(camera_entity)[1])
self._session = session
self._graph = detection_graph
self._category_index = category_index
self._min_confidence = config.get(CONF_CONFIDENCE)
self._file_out = config.get(CONF_FILE_OUT)
# handle categories and specific detection areas
categories = model_config.get(CONF_CATEGORIES)
self._include_categories = []
self._category_areas = {}
for category in categories:
if isinstance(category, dict):
category_name = category.get(CONF_CATEGORY)
category_area = category.get(CONF_AREA)
self._include_categories.append(category_name)
self._category_areas[category_name] = [0, 0, 1, 1]
if category_area:
self._category_areas[category_name] = [
category_area.get(CONF_TOP),
category_area.get(CONF_LEFT),
category_area.get(CONF_BOTTOM),
category_area.get(CONF_RIGHT)
]
else:
self._include_categories.append(category)
self._category_areas[category] = [0, 0, 1, 1]
# Handle global detection area
self._area = [0, 0, 1, 1]
area_config = model_config.get(CONF_AREA)
if area_config:
self._area = [
area_config.get(CONF_TOP),
area_config.get(CONF_LEFT),
area_config.get(CONF_BOTTOM),
area_config.get(CONF_RIGHT)
]
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {category: len(values)
for category, values in self._matches.items()},
ATTR_TOTAL_MATCHES: self._total_matches
}
def _save_image(self, image, matches, paths):
from PIL import Image, ImageDraw
import io
img = Image.open(io.BytesIO(bytearray(image))).convert('RGB')
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(draw, self._area,
img_width, img_height,
"Detection Area", (0, 255, 255))
for category, values in matches.items():
# Draw custom category regions/areas
if (category in self._category_areas
and self._category_areas[category] != [0, 0, 1, 1]):
label = "{} Detection Area".format(category.capitalize())
draw_box(draw, self._category_areas[category], img_width,
img_height, label, (0, 255, 0))
# Draw detected objects
for instance in values:
label = "{0} {1:.1f}%".format(category, instance['score'])
draw_box(draw, instance['box'],
img_width, img_height,
label, (255, 255, 0))
for path in paths:
_LOGGER.info("Saving results image to %s", path)
img.save(path)
def process_image(self, image):
"""Process the image."""
import numpy as np
try:
import cv2 # pylint: disable=import-error
img = cv2.imdecode(
np.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED)
inp = img[:, :, [2, 1, 0]] # BGR->RGB
inp_expanded = inp.reshape(1, inp.shape[0], inp.shape[1], 3)
except ImportError:
from PIL import Image
import io
img = Image.open(io.BytesIO(bytearray(image))).convert('RGB')
img.thumbnail((460, 460), Image.ANTIALIAS)
img_width, img_height = img.size
inp = np.array(img.getdata()).reshape(
(img_height, img_width, 3)).astype(np.uint8)
inp_expanded = np.expand_dims(inp, axis=0)
image_tensor = self._graph.get_tensor_by_name('image_tensor:0')
boxes = self._graph.get_tensor_by_name('detection_boxes:0')
scores = self._graph.get_tensor_by_name('detection_scores:0')
classes = self._graph.get_tensor_by_name('detection_classes:0')
boxes, scores, classes = self._session.run(
[boxes, scores, classes],
feed_dict={image_tensor: inp_expanded})
boxes, scores, classes = map(np.squeeze, [boxes, scores, classes])
classes = classes.astype(int)
matches = {}
total_matches = 0
for box, score, obj_class in zip(boxes, scores, classes):
score = score * 100
boxes = box.tolist()
# Exclude matches below min confidence value
if score < self._min_confidence:
continue
# Exclude matches outside global area definition
if (boxes[0] < self._area[0] or boxes[1] < self._area[1]
or boxes[2] > self._area[2] or boxes[3] > self._area[3]):
continue
category = self._category_index[obj_class]['name']
# Exclude unlisted categories
if (self._include_categories
and category not in self._include_categories):
continue
# Exclude matches outside category specific area definition
if (self._category_areas
and (boxes[0] < self._category_areas[category][0]
or boxes[1] < self._category_areas[category][1]
or boxes[2] > self._category_areas[category][2]
or boxes[3] > self._category_areas[category][3])):
continue
# If we got here, we should include it
if category not in matches.keys():
matches[category] = []
matches[category].append({
'score': float(score),
'box': boxes
})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(path_template.render(
camera_entity=self._camera_entity))
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches
|
skalavala/smarthome
|
custom_components/image_processing/tensorflow.py
|
Python
|
mit
| 13,028
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-17 01:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
],
),
]
|
fernandolobato/balarco
|
clients/migrations/0001_initial.py
|
Python
|
mit
| 614
|
import subprocess
import sys
import os
# This code is meant to manage running multiple instances of my KMCLib codes at the same time,
# in the name of time efficiency
numLambda = 512
numStepsEquilib = 1600000
numStepsAnal = 16000
numStepsSnapshot = 1000
numStepsReq = 16000
sysWidth = 32
sysLength = 32
analInterval = 1
numPasses = 100
timeInterval = 1.0
dataLocation = "dim2Runs/lambdaScan1/"
lambdaMin = 0.05
lambdaMax = 1.25
rateStepSize = (lambdaMax-lambdaMin)/float(numLambda-1)
runningJobs = []
for rateIndex in range(0, numLambda):
currentRate = lambdaMin + rateStepSize*rateIndex
botConc = 0.99
topConc = 0.01
jobInput = "2dSteadyFlow.py "+str(botConc)+" "+str(topConc)+" "+str(currentRate)+" "+str(sysWidth)+" "+str(sysLength)+" "+str(analInterval)+" "+str(numStepsEquilib)+" "+str(numStepsSnapshot)+" "+str(numStepsAnal)+" "+str(numStepsReq)+" "+str(numPasses)+" "+str(timeInterval)+" "+dataLocation+str(rateIndex)+"\n"
with open("jobInputs/testInput."+str(jobIndex), 'w') as f:
f.write(jobInput)
jobIndex += 1
|
joshuahellier/PhDStuff
|
codes/kmc/2Dim/general2d/lambdaFluc2dCreator.py
|
Python
|
mit
| 1,068
|
# -*- coding:utf8 -*-
from __future__ import division
import codecs
import re
def calWordProbability(infile, outfile):
'''
计算词概率,源语言词翻译成目标语言词的概率
一个源语言可能对应多个目标语言,这里计算平均值
infile: 输入文件 格式:source word \t target word
outfile: source word \t target word \t probability
'''
with codecs.open(infile, 'r', 'utf8') as fin:
# 用于存储数据结构
wordDic = {}
line = fin.readline()
linNum = 1
while line:
linNum += 1
if linNum % 10001 == 1:
print(linNum, line.encode('utf8'))
line = line.strip() # 删除两端空白符
wArr = re.split('[ |\t]', line)
if len(wArr) >= 2:
key = wArr[0] # 源语言词
val = wArr[1] # 目标语言词
if key in wordDic:
wordDic[key][val] = 1
else:
valMap = dict()
valMap[val] = 1
wordDic[key] = valMap
line = fin.readline()
with codecs.open(outfile, 'w', 'utf8') as fout:
print('start write')
wCount = 0
for key in wordDic.keys():
wCount += 1
if(wCount % 1001 == 0):
print('writing', wCount)
if len(key.split(' ')) > 1:
continue
valMap = wordDic[key]
valLen = len(valMap)
for val in valMap.keys():
fout.write(key)
fout.write('\t')
fout.write(val)
fout.write('\t')
fout.write(str(1/valLen))
fout.write('\n')
|
kfeiWang/pythonUtils
|
wordProbability.py
|
Python
|
mit
| 1,782
|
#Working with variables
import pyaudiogame
spk = pyaudiogame.speak
MyApp = pyaudiogame.App("My Application")
#Here are some variables
#Lets first write one line of text
my_name = "Frastlin"
#now lets write a number
my_age = 42
#now lets write several lines of text
my_song = """
My application tis to be,
the coolest you've ever seen!
"""
#Magic time!
def logic(actions):
key = actions['key']
if key == "a":
#Here is our one line of text, it will speak when we press a
spk(my_name)
elif key == "s":
#Here is our number, it will speak when we press s
spk(my_age)
elif key == "d":
#Here is our multiline text example. It will speak when we press d
spk(my_song)
MyApp.logic = logic
MyApp.run()
|
frastlin/PyAudioGame
|
examples/basic_tutorial/ex1.py
|
Python
|
mit
| 712
|
import numpy as np
import pandas as pd
from scipy.optimize import least_squares
import re
import lmfit
class Calibrate:
def __init__(self, model):
"""initialize Calibration class
Parameters
----------
model : ttim.Model
model to calibrate
"""
self.model = model
self.parameters = pd.DataFrame(columns=[
'optimal', 'std', 'perc_std', 'pmin', 'pmax', 'initial', 'parray'])
self.seriesdict = {}
self.seriesinwelldict = {}
def set_parameter(self, name=None, initial=0, pmin=-np.inf, pmax=np.inf):
"""set parameter to be optimized
Parameters
----------
name : str
parameter name, can include layer information.
name can be 'kaq', 'Saq' or 'c'. A number after the parameter
name denotes the layer number, i.e. 'kaq0' refers to the hydraulic
conductivity of layer 0.
name also supports layer ranges, entered by adding a '_' and a
layer number, i.e. 'kaq0_3' denotes conductivity for layers 0 up to
and including 3.
initial : np.float, optional
initial value for the parameter (the default is 0)
pmin : np.float, optional
lower bound for parameter value (the default is -np.inf)
pmax : np.float, optional
upper bound for paramater value (the default is np.inf)
"""
assert type(name) == str, "Error: name must be string"
# find numbers in name str for support layer ranges
layers_from_name = re.findall(r'\d+', name)
p = None
if "_" in name:
fromlay, tolay = [np.int(i) for i in layers_from_name]
if name[:3] == 'kaq':
p = self.model.aq.kaq[fromlay:tolay+1]
elif name[:3] == 'Saq':
p = self.model.aq.Saq[fromlay:tolay+1]
elif name[0] == 'c':
p = self.model.aq.c[fromlay:tolay+1]
# TODO: set Sll
else:
layer = np.int(layers_from_name[0])
# Set, kaq, Saq, c
if name[:3] == 'kaq':
p = self.model.aq.kaq[layer:layer + 1]
elif name[:3] == 'Saq':
p = self.model.aq.Saq[layer:layer + 1]
elif name[0] == 'c':
p = self.model.aq.c[layer:layer + 1]
# TODO: set Sll
if p is None: # no parameter set
print('parameter name not recognized or no parameter ref supplied')
return
self.parameters.loc[name] = {'optimal':initial, 'std':None,
'perc_std':None, 'pmin':pmin, 'pmax':pmax,
'initial':initial, 'parray':p[:]}
def set_parameter_by_reference(self, name=None, parameter=None, initial=0,
pmin=-np.inf, pmax=np.inf):
"""set parameter to be optimized
Parameters
----------
name : str
parameter name
parameter : np.array
array reference containing the parameter to be optimized. must be
specified as reference, i.e. w.rc[0:]
initial : np.float, optional
initial value for the parameter (the default is 0)
pmin : np.float, optional
lower bound for parameter value (the default is -np.inf)
pmax : np.float, optional
upper bound for paramater value (the default is np.inf)
"""
assert type(name) == str, "Error: name must be string"
if parameter is not None:
assert isinstance(parameter, np.ndarray), \
"Error: parameter needs to be numpy array"
p = parameter
self.parameters.loc[name] = {'optimal':initial, 'std':None,
'perc_std':None, 'pmin':pmin, 'pmax':pmax,
'initial':initial, 'parray':p[:]}
def series(self, name, x, y, layer, t, h):
"""method to add observations to Calibration object
Parameters
----------
name : str
name of series
x : np.float
x-coordinate
y : np.float
y-coordinate
layer : int
layer number, 0-indexed
t : np.array
array containing timestamps of timeseries
h : np.array
array containing timeseries values, i.e. head observations
"""
s = Series(x, y, layer, t, h)
self.seriesdict[name] = s
def seriesinwell(self, name, element, t, h):
"""method to add observations to Calibration object
Parameters
----------
name : str
name of series
element: element object with headinside function
t : np.array
array containing timestamps of timeseries
h : np.array
array containing timeseries values, i.e. head observations
"""
e = SeriesInWell(element, t, h)
self.seriesinwelldict[name] = e
def residuals(self, p, printdot=False):
"""method to calculate residuals given certain parameters
Parameters
----------
p : np.array
array containing parameter values
printdot : bool, optional
print dot for each function call
Returns
-------
np.array
array containing all residuals
"""
if printdot:
print('.', end='')
# set the values of the variables
if printdot == 7:
print(p)
for i, k in enumerate(self.parameters.index):
# [:] needed to do set value in array
self.parameters.loc[k, 'parray'][:] = p[i]
self.model.solve(silent=True)
rv = np.empty(0)
for key in self.seriesdict:
s = self.seriesdict[key]
h = self.model.head(s.x, s.y, s.t, layers=s.layer)
rv = np.append(rv, s.h - h)
for key in self.seriesinwelldict:
s = self.seriesinwelldict[key]
h = s.element.headinside(s.t)[0]
rv = np.append(rv, s.h - h)
return rv
def residuals_lmfit(self, lmfitparams, printdot=False):
vals = lmfitparams.valuesdict()
p = np.array([vals[k] for k in self.parameters.index])
#p = np.array([vals[k] for k in vals])
return self.residuals(p, printdot)
def fit_least_squares(self, report=True, diff_step=1e-4, xtol=1e-8,
method='lm'):
self.fitresult = least_squares(
self.residuals, self.parameters.initial.values, args=(True,),
bounds=(self.parameters.pmin.values, self.parameters.pmax.values),
method=method, diff_step=diff_step, xtol=xtol, x_scale="jac")
print('', flush=True)
# Call residuals to specify optimal values for model
res = self.residuals(self.fitresult.x)
for ipar in self.parameters.index:
self.parameters.loc[ipar, 'optimal'] = \
self.parameters.loc[ipar, 'parray'][0]
nparam = len(self.fitresult.x)
H = self.fitresult.jac.T @ self.fitresult.jac
sigsq = np.var(res, ddof=nparam)
self.covmat = np.linalg.inv(H) * sigsq
self.sig = np.sqrt(np.diag(self.covmat))
D = np.diag(1 / self.sig)
self.cormat = D @ self.covmat @ D
self.parameters['std'] = self.sig
self.parameters['perc_std'] = self.sig / \
self.parameters['optimal'] * 100
if report:
print(self.parameters)
print(self.sig)
print(self.covmat)
print(self.cormat)
def fit_lmfit(self, report=True, printdot=True):
import lmfit
self.lmfitparams = lmfit.Parameters()
for name in self.parameters.index:
p = self.parameters.loc[name]
self.lmfitparams.add(name, value=p['initial'], min=p['pmin'],
max=p['pmax'])
fit_kws = {"epsfcn": 1e-4}
self.fitresult = lmfit.minimize(self.residuals_lmfit, self.lmfitparams,
method="leastsq",
kws={"printdot":printdot}, **fit_kws)
print('', flush=True)
print(self.fitresult.message)
if self.fitresult.success:
for name in self.parameters.index:
self.parameters.loc[name, 'optimal'] = \
self.fitresult.params.valuesdict()[name]
if hasattr(self.fitresult, 'covar'):
self.parameters['std'] = np.sqrt(np.diag(self.fitresult.covar))
self.parameters['perc_std'] = 100 * self.parameters['std'] / \
np.abs(self.parameters['optimal'])
else:
self.parameters['std'] = np.nan
self.parameters['perc_std'] = np.nan
if report:
print(lmfit.fit_report(self.fitresult))
def fit(self, report=True, printdot=True):
# current default fitting routine
return self.fit_lmfit(report, printdot)
def rmse(self):
"""calculate root-mean-squared-error
Returns
-------
np.float
return rmse value
"""
r = self.residuals(self.parameters['optimal'].values)
return np.sqrt(np.mean(r ** 2))
class Series:
def __init__(self, x, y, layer, t, h):
self.x = x
self.y = y
self.layer = layer
self.t = t
self.h = h
class SeriesInWell:
def __init__(self, element, t, h):
self.element = element
self.t = t
self.h = h
|
mbakker7/ttim
|
ttim/fit.py
|
Python
|
mit
| 10,043
|
from app import db, GenericRecord
class Case(GenericRecord):
__collection__ = 'cases'
db.register([Case])
|
michaelnetbiz/mistt-solution
|
app/models/cases.py
|
Python
|
mit
| 114
|
from __future__ import absolute_import
from __future__ import print_function
import argparse
import distutils.spawn
import subprocess
import sys
import virtualenv
def main(argv=None):
parser = argparse.ArgumentParser(description=(
'A wrapper around virtualenv that avoids sys.path sadness. '
'Any additional arguments are passed directly to `virtualenv`.'
))
parser.add_argument('-p', '--python', default=sys.executable)
args, rest_argv = parser.parse_known_args(argv)
# The way virtualenv deals with -ppython is to exec itself with the right
# interpreter.
# The problem with this is the following:
# - python foo/bar/baz.py puts foo/bar on the path
# - in an environment where virtualenv and future (or others that shadow
# stdlib module names) this puts lib/.../site-packages on the path
# - So for example, consider a python2.7 venv calling
# `virtualenv -ppython3.4 venv`
# - This'll call something like:
# `/usr/bin/python3.4 .../lib/python2.7/site-packages/virtualenv.py venv`
# - This'll put .../lib/python2.7/site-packages on the path *first*
# - This'll make the py27 site-packages override the stdlib site-packages
# - If you have python-future installed you'll get something like:
# File "/usr/lib/python3.4/re.py", line 324, in <module>
# import copyreg
# File ".../lib/python2.7/site-packages/copyreg/__init__.py", line 7, in
# <module>
# raise ImportError('This package should not be accessible on
# Python 3. '
# ImportError: This package should not be accessible on Python 3. Either
# you are trying to run from the python-future src folder or your
# installation of python-future is corrupted.
exe = distutils.spawn.find_executable(args.python)
venv_file = virtualenv.__file__.rstrip('co')
hax_script = (
'import sys\n'
'sys.argv[1:] = {rest_argv!r}\n'
'__file__ = {venv_file!r}\n'
'__name__ = "__main__"\n'
'exec(open({venv_file!r}, "rb").read())\n'
).format(rest_argv=rest_argv, venv_file=venv_file)
subprocess.check_call((exe, '-c', hax_script))
if __name__ == '__main__':
exit(main())
|
asottile/virtualenv-hax
|
virtualenv_hax.py
|
Python
|
mit
| 2,216
|
def run():
import sys, os
try:
uri = sys.argv[1]
except IndexError:
uri = os.getcwd()
import gtk
from .app import App
from uxie.utils import idle
application = App()
idle(application.open, uri)
gtk.main()
|
baverman/fmd
|
fmd/run.py
|
Python
|
mit
| 262
|
#!/usr/bin/env python
# coding: utf-8
from django.shortcuts import render
from table.views import FeedDataView
from app.tables import (
ModelTable, AjaxTable, AjaxSourceTable,
CalendarColumnTable, SequenceColumnTable,
LinkColumnTable, CheckboxColumnTable,
ButtonsExtensionTable
)
def base(request):
table = ModelTable()
return render(request, "index.html", {'people': table})
def ajax(request):
table = AjaxTable()
return render(request, "index.html", {'people': table})
def ajax_source(request):
table = AjaxSourceTable()
return render(request, "index.html", {'people': table})
class Foo(object):
def __init__(self, id, name, calendar):
self.id = id
self.name = name
self.calendar = calendar
def sequence_column(request):
data = [
Foo(1, 'A', [1, 2, 3, 4, 5]),
Foo(2, 'B', [1, 2, 3, 4, 5]),
Foo(3, 'C', [1, 2, 3, 4, 5])
]
table = SequenceColumnTable(data)
return render(request, "index.html", {'people': table})
def calendar_column(request):
data = [
Foo(1, 'A', range(1, 14)),
Foo(2, 'B', range(1, 14)),
Foo(3, 'C', range(1, 14))
]
table = CalendarColumnTable(data)
return render(request, "index.html", {'people': table})
def link_column(request):
table = LinkColumnTable()
return render(request, "index.html", {'people': table})
def checkbox_column(request):
table = CheckboxColumnTable()
return render(request, "index.html", {'people': table})
def buttons_extension(request):
table = ButtonsExtensionTable()
return render(request, "index.html", {'people': table})
def user_profile(request, uid):
from app.models import Person
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
person = get_object_or_404(Person, pk=uid)
return HttpResponse("User %s" % person.name)
class MyDataView(FeedDataView):
token = AjaxSourceTable.token
def get_queryset(self):
return super(MyDataView, self).get_queryset().filter(id__gt=5)
|
shymonk/django-datatable
|
example/app/views.py
|
Python
|
mit
| 2,085
|
from . import common
import hglib
class test_branches(common.basetest):
def test_empty(self):
self.assertEquals(self.client.branches(), [])
def test_basic(self):
self.append('a', 'a')
rev0 = self.client.commit('first', addremove=True)
self.client.branch('foo')
self.append('a', 'a')
rev1 = self.client.commit('second')
branches = self.client.branches()
expected = []
for r, n in (rev1, rev0):
r = self.client.log(r)[0]
expected.append((r.branch, int(r.rev), r.node[:12]))
self.assertEquals(branches, expected)
def test_active_closed(self):
pass
|
beckjake/python3-hglib
|
tests/test-branches.py
|
Python
|
mit
| 674
|
from .utils import Serialize
###{standalone
class Symbol(Serialize):
__slots__ = ('name',)
is_term = NotImplemented
def __init__(self, name):
self.name = name
def __eq__(self, other):
assert isinstance(other, Symbol), other
return self.is_term == other.is_term and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.name)
fullrepr = property(__repr__)
class Terminal(Symbol):
__serialize_fields__ = 'name', 'filter_out'
is_term = True
def __init__(self, name, filter_out=False):
self.name = name
self.filter_out = filter_out
@property
def fullrepr(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out)
class NonTerminal(Symbol):
__serialize_fields__ = 'name',
is_term = False
class RuleOptions(Serialize):
__serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices'
def __init__(self, keep_all_tokens=False, expand1=False, priority=None, template_source=None, empty_indices=()):
self.keep_all_tokens = keep_all_tokens
self.expand1 = expand1
self.priority = priority
self.template_source = template_source
self.empty_indices = empty_indices
def __repr__(self):
return 'RuleOptions(%r, %r, %r, %r)' % (
self.keep_all_tokens,
self.expand1,
self.priority,
self.template_source
)
class Rule(Serialize):
"""
origin : a symbol
expansion : a list of symbols
order : index of this expansion amongst all rules of the same name
"""
__slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash')
__serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options'
__serialize_namespace__ = Terminal, NonTerminal, RuleOptions
def __init__(self, origin, expansion, order=0, alias=None, options=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
self.order = order
self.options = options or RuleOptions()
self._hash = hash((self.origin, tuple(self.expansion)))
def _deserialize(self):
self._hash = hash((self.origin, tuple(self.expansion)))
def __str__(self):
return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion))
def __repr__(self):
return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Rule):
return False
return self.origin == other.origin and self.expansion == other.expansion
###}
|
python-poetry/poetry-core
|
src/poetry/core/_vendor/lark/grammar.py
|
Python
|
mit
| 2,918
|
from datetime import datetime
from flask import Blueprint, jsonify, request
from app.dao.fact_notification_status_dao import (
get_total_notifications_for_date_range,
)
from app.dao.fact_processing_time_dao import (
get_processing_time_percentage_for_date_range,
)
from app.dao.services_dao import get_live_services_with_organisation
from app.errors import register_errors
from app.performance_dashboard.performance_dashboard_schema import (
performance_dashboard_request,
)
from app.schema_validation import validate
performance_dashboard_blueprint = Blueprint('performance_dashboard', __name__, url_prefix='/performance-dashboard')
register_errors(performance_dashboard_blueprint)
@performance_dashboard_blueprint.route('')
def get_performance_dashboard():
# All statistics are as of last night this matches the existing performance platform
# and avoids the need to query notifications.
if request.args:
# Is it ok to reuse this? - should probably create a new one
validate(request.args, performance_dashboard_request)
# If start and end date are not set, we are expecting today's stats.
today = str(datetime.utcnow().date())
start_date = datetime.strptime(request.args.get('start_date', today), '%Y-%m-%d').date()
end_date = datetime.strptime(request.args.get('end_date', today), '%Y-%m-%d').date()
total_for_all_time = get_total_notifications_for_date_range(start_date=None, end_date=None)
total_notifications, emails, sms, letters = transform_results_into_totals(total_for_all_time)
totals_for_date_range = get_total_notifications_for_date_range(start_date=start_date, end_date=end_date)
processing_time_results = get_processing_time_percentage_for_date_range(start_date=start_date, end_date=end_date)
services = get_live_services_with_organisation()
stats = {
"total_notifications": total_notifications,
"email_notifications": emails,
"sms_notifications": sms,
"letter_notifications": letters,
"notifications_by_type": transform_into_notification_by_type_json(totals_for_date_range),
"processing_time": transform_processing_time_results_to_json(processing_time_results),
"live_service_count": len(services),
"services_using_notify": transform_services_to_json(services)
}
return jsonify(stats)
def transform_results_into_totals(total_notifications_results):
total_notifications = 0
emails = 0
sms = 0
letters = 0
for x in total_notifications_results:
total_notifications += x.emails
total_notifications += x.sms
total_notifications += x.letters
emails += x.emails
sms += x.sms
letters += x.letters
return total_notifications, emails, sms, letters
def transform_into_notification_by_type_json(total_notifications):
j = []
for x in total_notifications:
j.append({"date": x.bst_date, "emails": x.emails, "sms": x.sms, "letters": x.letters})
return j
def transform_processing_time_results_to_json(processing_time_results):
j = []
for x in processing_time_results:
j.append({"date": x.date, "percentage_under_10_seconds": x.percentage})
return j
def transform_services_to_json(services_results):
j = []
for x in services_results:
j.append({"service_id": x.service_id, "service_name": x.service_name,
"organisation_id": x.organisation_id, "organisation_name": x.organisation_name}
)
return j
|
alphagov/notifications-api
|
app/performance_dashboard/rest.py
|
Python
|
mit
| 3,531
|
#!/usr/bin/python3
import os
import requests
import sys
structure_template = 'src/{}/kotlin/solutions/day{}'
solver_template = """
package solutions.{package}
import solutions.Solver
class {clazz}: Solver {{
override fun solve(input: List<String>, partTwo: Boolean): String {{
TODO("not implemented")
}}
}}
"""
test_template = """
package solutions.{package}
import solutions.AbstractDayTest
class {clazz}Test: AbstractDayTest({clazz}()) {{
override fun getPart1Data(): List<TestData> = listOf(
TestData(listOf(""), "")
)
override fun getPart2Data(): List<TestData> = listOf(
TestData(listOf(""), "")
)
}}
"""
def generate_files(day, kind, template):
day_as_string = str(day).zfill(2)
clazz = 'Day{}'.format(day)
package = 'day{}'.format(day_as_string)
folder = structure_template.format(kind, day_as_string)
os.makedirs(folder)
suffix = ''
if kind == 'test':
suffix = 'Test'
path = "{}/{}.{}".format(folder, clazz+suffix, 'kt')
content = template.format(package=package, clazz=clazz)
with open(path, 'w') as f:
f.write(content)
return folder
def fetch_input(day, token, folder):
url = 'https://adventofcode.com/2017/day/{}/input'.format(day)
res = requests.get(url, cookies={'session': token})
input_file = '{}/input'.format(folder)
with open(input_file, 'wb') as f:
for chunk in res.iter_content(chunk_size=128):
f.write(chunk)
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: setup.py token day")
exit(1)
token = sys.argv[1]
day = sys.argv[2]
day_int = -1
try:
day_int = int(day)
except ValueError:
print("Could not parse day as number: [{}]".format(day))
exit(1)
folder = generate_files(day_int, 'main', solver_template)
generate_files(day_int, 'test', test_template)
fetch_input(day_int, token, folder)
|
Dr-Horv/Advent-of-Code-2017
|
setup.py
|
Python
|
mit
| 1,971
|
import logging
from sanic.signals import RESERVED_NAMESPACES
from sanic.touchup import TouchUp
def test_touchup_methods(app):
assert len(TouchUp._registry) == 9
async def test_ode_removes_dispatch_events(app, caplog):
with caplog.at_level(logging.DEBUG, logger="sanic.root"):
await app._startup()
logs = caplog.record_tuples
for signal in RESERVED_NAMESPACES["http"]:
assert (
"sanic.root",
logging.DEBUG,
f"Disabling event: {signal}",
) in logs
|
channelcat/sanic
|
tests/test_touchup.py
|
Python
|
mit
| 528
|
# by amounra 0216 : http://www.aumhaa.com
"""
Codec_Map.py
Created by amounra on 2010-10-05.
Copyright (c) 2010 __artisia__. All rights reserved.
This file allows the reassignment of the controls from their default arrangement. The order is from left to right;
Buttons are Note #'s and Faders/Rotaries are Controller #'s
"""
CHANNEL = 0 #main channel (0 - 15)
CODE_BUTTONS = [[1, 5, 9, 13, 17, 21, 25, 29],
[2, 6, 10, 14, 18, 22, 26, 30],
[3, 7, 11, 15, 19, 23, 27, 31],
[4, 8, 12, 16, 20, 24, 28, 32]]
CODE_DIALS = [[1, 5, 9, 13, 17, 21, 25, 29],
[2, 6, 10, 14, 18, 22, 26, 30],
[3, 7, 11, 15, 19, 23, 27, 31],
[4, 8, 12, 16, 20, 24, 28, 32]]
CODE_COLUMN_BUTTONS = [38, 39, 40, 41, 42, 43, 44, 45]
CODE_ROW_BUTTONS = [33, 34, 35, 36]
LIVID = 37 #single
FOLLOW = True #this sets whether or not the last selected device on a track is selected for editing when you select a new track
COLOR_MAP = [127, 127, 127, 127, 127, 127, 127]
USE_DEVICE_SELECTOR = True
FACTORY_RESET = False
SHIFT_LATCHING = True
from aumhaa.v2.livid.colors import *
class CodecColors:
ResetSendsColor = LividRGB.WHITE
class Mod:
ShiftOff = LividRGB.OFF
ShiftOn = LividRGB.WHITE
class Mode:
Main = LividRGB.WHITE
Main_shifted = LividRGB.BlinkFast.WHITE
class ShiftMode:
Enabled = LividRGB.BlinkFast.WHITE
Disabled = LividRGB.OFF
class DefaultButton:
On = LividRGB.WHITE
Off = LividRGB.OFF
Disabled = LividRGB.OFF
Alert = LividRGB.BlinkFast.WHITE
class Session:
StopClipTriggered = LividRGB.BlinkFast.BLUE
StopClip = LividRGB.BLUE
Scene = LividRGB.CYAN
NoScene = LividRGB.OFF
SceneTriggered = LividRGB.BlinkFast.BLUE
ClipTriggeredPlay = LividRGB.BlinkFast.GREEN
ClipTriggeredRecord = LividRGB.BlinkFast.RED
RecordButton = LividRGB.OFF
ClipStopped = LividRGB.WHITE
ClipStarted = LividRGB.GREEN
ClipRecording = LividRGB.RED
NavigationButtonOn = LividRGB.BLUE
class Mixer:
SoloOn = LividRGB.CYAN
SoloOff = LividRGB.OFF
MuteOn = LividRGB.YELLOW
MuteOff = LividRGB.OFF
ArmSelected = LividRGB.GREEN
ArmUnselected = LividRGB.RED
ArmOff = LividRGB.OFF
StopClip = LividRGB.BLUE
SelectedOn = LividRGB.BLUE
SelectedOff = LividRGB.OFF
class Recording:
Transition = LividRGB.BlinkSlow.GREEN
class Recorder:
On = LividRGB.WHITE
Off = LividRGB.BLUE
NewOn = LividRGB.BlinkFast.YELLOW
NewOff = LividRGB.YELLOW
FixedOn = LividRGB.BlinkFast.CYAN
FixedOff = LividRGB.CYAN
RecordOn = LividRGB.BlinkFast.GREEN
RecordOff = LividRGB.GREEN
FixedAssigned = LividRGB.MAGENTA
FixedNotAssigned = LividRGB.OFF
class Transport:
OverdubOn = LividRGB.BlinkFast.RED
OverdubOff = LividRGB.RED
StopOn = LividRGB.BLUE
StopOff = LividRGB.BLUE
class Device:
NavOn = LividRGB.MAGENTA
NavOff = LividRGB.OFF
BankOn = LividRGB.YELLOW
BankOff = LividRGB.OFF
ChainNavOn = LividRGB.RED
ChainNavOff = LividRGB.OFF
ContainNavOn = LividRGB.CYAN
ContainNavOff = LividRGB.OFF
## a
|
LividInstruments/LiveRemoteScripts
|
Codec_v2/Map.py
|
Python
|
mit
| 3,024
|
#!/usr/bin/env python3
from __future__ import print_function
import datetime
import calendar
import logging
import time
import re
import os
import os.path
from abc import ABCMeta
from abc import abstractmethod
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from PIL import Image
import PortalProperties
__version__ = "1.0.1"
__author__ = "Jose Miguel Colella"
__email__ = "jose.colella@dynatrace.com"
__license__ = "MIT"
logger = logging.getLogger(__name__)
logging.basicConfig(
filename="portal.log", format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
class AbstractPortal(object):
"""AbstractPortal is an abstract class that encapsulates all the
common attributes and methods of the Synthetic Portal such as
username, password, login
Attributes:
driver (selenium.webdriver.phantomjs.webdriver.WebDriver): The webdriver instance
"""
__metaclass__ = ABCMeta
screenshotDebugDir = "screenshotDebug"
def __init__(self, username, password):
assert type(username) is str, print("username is a string")
assert type(password) is str, print("password is a string")
# If the operating system is windows or *nix.
self._osNull = {
"nt": "NUL",
"posix": "/dev/null"
}
self.driver = webdriver.PhantomJS(
service_log_path=self._osNull[os.name], service_args=["--ignore-ssl-errors=true"])
self.driver.maximize_window()
self.windowSize = self.driver.get_window_size()
self._username = username
self._password = password
self._screenshotDebugDumpDirPath = "{path}/{directory}".format(
path=os.getcwd(), directory=AbstractPortal.screenshotDebugDir)
self._checkDumpDirIsCreated()
@property
@abstractmethod
def homePage(self):
"""
str: The url of the portal
"""
pass
@property
@abstractmethod
def usernameInputIdentifier(self):
"""
str: The DOM attribute to fetch the username <input>
"""
pass
@property
@abstractmethod
def passwordInputIdentifier(self):
"""
str: The DOM attribute to fetch the password <input>
"""
pass
@property
@abstractmethod
def submitButtonIdentifier(self):
"""
str: The DOM attribute to fetch the log in button
"""
pass
@property
def username(self):
"""
str: The username used to login. (Read-Only)
"""
return self._username
@property
def password(self):
"""
str: The password used to login.(Read-Only)
"""
return self._password
def _checkDumpDirIsCreated(self):
if not os.path.isdir(self.screenshotDebugDir):
os.mkdir(self._screenshotDebugDumpDirPath)
def _saveDebugScreenshot(self, screenshotName):
self.driver.save_screenshot(
"{}/{}-{}.png".format(AbstractPortal.screenshotDebugDir, datetime.datetime.today(), screenshotName))
def login(self):
"""login() inputs the username and password into the corresponding DOM elements
of the home page and establishes a session that allows for the interaction
"""
logging.debug("Fetching Dynatrace Login Page")
self.driver.get(self.homePage)
logging.debug("Finish fetching page")
self._saveDebugScreenshot("Home")
usernameInput = self.driver.find_element_by_name(
self.usernameInputIdentifier)
passwordInput = self.driver.find_element_by_name(
self.passwordInputIdentifier)
logging.debug("Sending username credentials")
usernameInput.send_keys(self.username)
logging.debug("Sending password credentials")
passwordInput.send_keys(self.password)
submitButton = self.driver.find_element_by_id(
self.submitButtonIdentifier)
logging.debug("Sending button click")
submitButton.click()
logging.debug("Waiting for page to load")
def close(self):
"""Closes the driver session and the phantomjs process.
"""
self.driver.quit()
class GPNPortal(AbstractPortal):
tableId = "ctl00_Content_XFSummaryTable"
endOfMonthProjectionIdentifier = "ctl00_Content_XFProjectedUsage"
accountsListIdentifier = "identity-btn-name"
accountsListDropdownIdentifier = "divIdentityList"
def __init__(self, username, password):
super(GPNPortal, self).__init__(username, password)
self.accountsList = set()
self.accountNameRegex = re.compile(r":(?P<accountName>.+):")
@property
def homePage(self):
return "https://www.gomeznetworks.com/index.asp?g=1"
@property
def usernameInputIdentifier(self):
return "username"
@property
def passwordInputIdentifier(self):
return "pwd"
@property
def submitButtonIdentifier(self):
return "loginbutton"
def _getCurrentAccountName(self):
currentAccountName = self.driver.find_element_by_id(
"identity-btn-name").text
return currentAccountName
def login(self):
super(GPNPortal, self).login()
try:
WebDriverWait(self.driver, 30).until(
EC.visibility_of_element_located((By.CLASS_NAME, "black-1")))
except Exception:
logging.warning("The page could not load")
time.sleep(5)
self.portalWindow = self.driver.current_window_handle
self._saveDebugScreenshot("Login")
def getXFMeasurement(self, startDay=1, endDay=calendar.monthrange(datetime.date.today().year, datetime.date.today().month)[1], startMonth=datetime.date.today().month, endMonth=datetime.date.today().month):
"""getXFMeasurement(startDay, endDay, startMonth, endMonth) returns the XF consumption for the current account
calculating the monthly offset, end of month projection, and the sum of the xf measurements from `startDay` to
`endDay`
Args:
startDay (Optional[int]): The initial day to get the XF measurements. Defaults to 1
endDay (Optional[int]): The last day to get the XF measurements. Defaults to the last day of the month
startMonth (Optional[int]): The starting month from which to fetch the XF measurements. Defaults to current month
endMonth (Optional[int]): The ending month from which to fetch the XF measurem
Returns:
XFMeasurement: an instance of the XFMeasurement class initialized with the monthly offset, end of month projection, and the sum of the
XF measurements from `startDay` to `endDay`
Raises:
AssertionError: If `startMonth` is not equal to `endMonth`. The GPN Portal will only show XF consumption
measurement one month at a time
"""
assert startMonth == endMonth, "Expected startMonth to be equal to endMonth. {} is not equal to {}".format(
startMonth, endMonth)
currentYear = datetime.date.today().year
xfConsumptionPage = "https://www.gomeznetworks.com/reports/flexReport.aspx?x=&startdate={startYear}/{startMonth}/{startDay}&enddate={endYear}/{endMonth}/{endDay}".format(
startYear=currentYear,
startMonth=startMonth,
startDay=startDay,
endYear=currentYear,
endMonth=endMonth,
endDay=endDay
)
self.driver.execute_script(
"window.open('{}')" .format(xfConsumptionPage))
self.driver.switch_to_window(self.driver.window_handles[1])
try:
WebDriverWait(self.driver, 30).until(
EC.visibility_of_element_located((By.ID, "ctl00$Content$Chart")))
except Exception:
logging.warning("The page could not load")
print("Account: {}".format(
self.driver.find_element_by_class_name("black-1").text))
xfConsumption = PortalProperties.XFMeasurement()
xfConsumption.setEndOfMonthProjection(
self.driver.find_element_by_id(GPNPortal.endOfMonthProjectionIdentifier).text)
summaryTable = self.driver.find_element_by_id(GPNPortal.tableId)
xfMeasurementHtmlTable = summaryTable.find_elements_by_tag_name("td")
xfConsumption.setXFTable(xfMeasurementHtmlTable)
xfConsumption.setSumXFMeasurement(startDay, endDay)
xfConsumption.setMonthlyOffset(endDay)
return xfConsumption
def switchAccount(self):
if len(self.driver.window_handles) > 1:
self.driver.execute_script("window.close()")
self.driver.switch_to_window(self.driver.window_handles[0])
cleanAccountName = lambda account: (
re.search(self.accountNameRegex, account).group("accountName")).strip()
self.accountsList.add(cleanAccountName(self._getCurrentAccountName()))
# Button needs to be clicked in order to see other accounts
self.driver.find_element_by_id(
GPNPortal.accountsListIdentifier).click()
accountList = self.driver.find_element_by_id(
GPNPortal.accountsListDropdownIdentifier)
# Everything but the first and last element as the first element is the tr -> Switch accounts and the last tr
# has an empty name
accountListRows = accountList.find_elements_by_tag_name("tr")[1:-1]
accounts = [{"name": cleanAccountName(accountListRow.text), "node": accountListRow}
for accountListRow in accountListRows if cleanAccountName(accountListRow.text) not in self.accountsList]
logging.info(accounts)
# Click the first account in the dropdown
accounts[0]["node"].click()
try:
WebDriverWait(self.driver, 30).until(
EC.visibility_of_element_located((By.CLASS_NAME, "black-1")))
except Exception:
logging.warning("The page could not load")
time.sleep(5)
logging.info("Current Account: {}".format(
cleanAccountName(self._getCurrentAccountName())))
self._saveDebugScreenshot("SwitchAccount.png")
logging.info(self.accountsList)
class DynatracePortal(AbstractPortal):
monitorAnalyzeId = "monitoranalyze"
interactiveChartId = "apmInteractiveChart"
chartsClass = "apm-btn-link"
logoutId = "sign-out"
iframeName = "apmframe"
chartsUrl = "http://cr02.dynatrace.com/en_US/group/guest/interactive-charts"
@property
def homePage(self):
return "https://www.gomezapm.com"
@property
def usernameInputIdentifier(self):
return "username"
@property
def passwordInputIdentifier(self):
return "pw"
@property
def submitButtonIdentifier(self):
return "signIn"
def __init__(self, username, password):
super(DynatracePortal, self).__init__(username, password)
# Sets the driver to wait 10 seconds to poll the DOM. Very useful for
# sites like Dynatrace Portal that take a while to load elements
self.driver.implicitly_wait(10)
self.chartsCaptured = set()
self.currentAccountName = self.username
self.croppingChartsDimension = {
"left": 675,
"up": 270,
"right": 675,
"down": 150
}
self.performanceMapDimension = {
"right": 600,
"up": 285,
"left": 600,
"down": 400
}
def _cropElement(self, selectorType, selector, sourceFile, destinationFile="output.png"):
"""Allows for cropping elements from an image given a selectorType, and
selector as well as a destination file to save the element to.
Args:
selectorType (str): The selector type for the DOM element, e.g "id", "class", "name"
selector (str): The selector to be extract
sourceFile (str): The name of the file to crop
destinationFile (str[optional]): The name o
"""
assert selectorType in {"id", "class", "name", "tag"}
try:
if selectorType is "id":
elements = self.driver.find_elements_by_id(selector)
elif selectorType is "class":
elements = self.driver.find_elements_by_class_name(selector)
elif selectorType is "name":
elements = self.driver.find_elements_by_name(selector)
elif selectorType is "tag":
elements = self.driver.find_elements_by_tag_name(selector)
else:
pass
chartImage = Image.open(sourceFile)
for element in elements:
if sum(element.location.values()) is not 0 and sum(element.size.values()) is not 0:
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
croppedImage = chartImage.crop((left, top, right, bottom))
croppedImage.save(destinationFile)
chartImage.close()
except NoSuchElementException:
raise NoSuchElementException
def login(self):
super(DynatracePortal, self).login()
try:
WebDriverWait(self.driver, 30).until(
EC.presence_of_element_located((By.ID, DynatracePortal.monitorAnalyzeId)))
logging.info(
"Successfully logged in with user: {}".format(self.username))
except Exception:
logging.warning("WARNING: The login page could not load")
logging.debug("Sleeping for 15 seconds")
time.sleep(15)
self._saveDebugScreenshot("Login")
def getInteractiveCharts(self):
logging.debug("navigating to charts URL")
self.driver.get(DynatracePortal.chartsUrl)
try:
WebDriverWait(self.driver, 60).until(
EC.invisibility_of_element_located((By.CLASS_NAME, "gwt-Image")))
except Exception:
logging.warn(
"WARNING: Element could not be found within the time frame")
logging.debug("Sleeping for 15 seconds")
time.sleep(15)
self._saveDebugScreenshot("ChartsAvailable")
def getChartPage(self, chartName):
self.getInteractiveCharts()
chartTimeoutSeconds = 60
availableCharts = self.driver.find_elements_by_class_name(
DynatracePortal.chartsClass)
try:
chartNodes = filter(
lambda node: node.text == chartName and node.text != "", availableCharts)
chartNode = next(chartNodes)
# Click on chart node
chartNode.click()
try:
wait = WebDriverWait(self.driver, chartTimeoutSeconds)
wait.until(EC.visibility_of_element_located((By.TAG_NAME, "svg")))
except Exception:
raise Exception("No chart element was found during {}".format(chartTimeoutSeconds))
logging.debug("Sleeping for 20 seconds")
time.sleep(20)
except Exception:
raise Exception("Expected valid chart name. Available charts are: {}".format(
[elem.text for elem in availableCharts if elem.text != ""]))
def saveChartToScreenshot(self, chartName, specificElements=[], saveDir="."):
"""saveChartToScreenshot saves a screenshot of the `chartName` provided
as a parameter.
Args:
chartName (str): The name of the chart to get the screenshot
specificElement(list): The web element to crop
cropChart (Optional[bool]): Crop only chart section. Defaults to False
saveDir (Optional[str]): The directory to save the screenshot. Defaults to '.'
"""
self.getChartPage(chartName)
imageName = "{}/{}-uncropped.png".format(saveDir, chartName)
self.driver.save_screenshot(imageName)
if specificElements:
typeSelectorList = [(specificElements[element], specificElements[
element + 1]) for element in range(0, len(specificElements), 2)]
try:
for specificElement in typeSelectorList:
saveFileName = "{}/{}-{}.png".format(saveDir, chartName, specificElement[1])
self._cropElement(specificElement[0], specificElement[1], imageName, saveFileName)
logging.info("Finished saving {destination} screenshot to {directory} directory".format(
destination=saveFileName, directory=saveDir))
if os.path.isfile(imageName):
os.remove(imageName)
except SystemError:
pass
|
josecolella/Dynatrace-Resources
|
portal/Portal.py
|
Python
|
mit
| 17,063
|
import os
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestAlias(scaffolds.ResultsTest):
"""..."""
def test_unknown_command(self):
"""Should fail if the command is not recognized."""
r = support.run_command('alias fake')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'UNKNOWN_COMMAND')
def test_list(self):
"""..."""
r = support.run_command('alias list')
self.assertFalse(r.failed, 'should not have failed')
def test_add(self):
"""..."""
p = self.get_temp_path('aliaser')
r = support.run_command('alias add test "{}" --temporary'.format(p))
self.assertFalse(r.failed, 'should not have failed')
def test_remove(self):
"""..."""
directory = self.get_temp_path('aliaser')
path = os.path.join(directory, 'test.text')
with open(path, 'w+') as f:
f.write('This is a test')
support.run_command('alias add test "{}" --temporary'.format(path))
r = support.run_command('alias remove test --temporary')
self.assertFalse(r.failed, 'should not have failed')
self.assertFalse(r.failed, 'should not have failed')
def test_empty(self):
"""..."""
r = support.run_command('alias add')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'MISSING_ARG')
def test_autocomplete_command(self):
"""..."""
result = support.autocomplete('alias ad')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], 'add')
def test_autocomplete_alias(self):
"""..."""
result = support.autocomplete('alias add fake-alias-not-real')
self.assertEqual(len(result), 0)
def test_autocomplete_path(self):
"""..."""
path = os.path.dirname(os.path.realpath(__file__))
result = support.autocomplete('alias add test {}'.format(path))
self.assertIsNotNone(result)
|
sernst/cauldron
|
cauldron/test/cli/commands/test_alias.py
|
Python
|
mit
| 2,067
|
def test1():
SINK(SOURCE)
def test2():
s = SOURCE
SINK(s)
def source():
return SOURCE
def sink(arg):
SINK(arg)
def test3():
t = source()
SINK(t)
def test4():
t = SOURCE
sink(t)
def test5():
t = source()
sink(t)
def test6(cond):
if cond:
t = "Safe"
else:
t = SOURCE
if cond:
SINK(t)
def test7(cond):
if cond:
t = SOURCE
else:
t = "Safe"
if cond:
SINK(t)
def source2(arg):
return source(arg)
def sink2(arg):
sink(arg)
def sink3(cond, arg):
if cond:
sink(arg)
def test8(cond):
t = source2()
sink2(t)
#False positive
def test9(cond):
if cond:
t = "Safe"
else:
t = SOURCE
sink3(cond, t)
def test10(cond):
if cond:
t = SOURCE
else:
t = "Safe"
sink3(cond, t)
def hub(arg):
return arg
def test11():
t = SOURCE
t = hub(t)
SINK(t)
def test12():
t = "safe"
t = hub(t)
SINK(t)
import module
def test13():
t = module.dangerous
SINK(t)
def test14():
t = module.safe
SINK(t)
def test15():
t = module.safe2
SINK(t)
def test16():
t = module.dangerous_func()
SINK(t)
def test20(cond):
if cond:
t = CUSTOM_SOURCE
else:
t = SOURCE
if cond:
CUSTOM_SINK(t)
else:
SINK(t)
def test21(cond):
if cond:
t = CUSTOM_SOURCE
else:
t = SOURCE
if not cond:
CUSTOM_SINK(t)
else:
SINK(t)
def test22(cond):
if cond:
t = CUSTOM_SOURCE
else:
t = SOURCE
t = TAINT_FROM_ARG(t)
if cond:
CUSTOM_SINK(t)
else:
SINK(t)
from module import dangerous as unsafe
SINK(unsafe)
def test23():
with SOURCE as t:
SINK(t)
def test24():
s = SOURCE
SANITIZE(s)
SINK(s)
def test_update_extend(x, y):
l = [SOURCE]
d = {"key" : SOURCE}
x.extend(l)
y.update(d)
SINK(x[0])
SINK(y["key"])
l2 = list(l)
d2 = dict(d)
def test_truth():
t = SOURCE
if t:
SINK(t)
else:
SINK(t)
if not t:
SINK(t)
else:
SINK(t)
|
github/codeql
|
python/ql/test/library-tests/taint/dataflow/test.py
|
Python
|
mit
| 2,191
|
"""
This module contains a single class that manages the scraping of data
from one or more supermarkets on mysupermarket.co.uk
"""
from datetime import datetime
from os import remove
from os.path import isfile, getmtime
from time import time
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.utils.project import get_project_settings
from app_config import supermarket_names, supermarket_url, supermarket_filename
from .reactor_control import ReactorControl
from .spiders.mysupermarket import MySupermarketSpider
class CachingScraper():
"""
A "crawler manager" that manages scraping mysupermarket.co.uk for one or
more supermarkets. For each supermarket, it checks the cache file then
creates and starts a crawler if appropriate.
"""
def __init__(self, supermarkets=supermarket_names(), force_refresh=False):
"""Create a CachingScraper for the given supermarket(s).
Keyword arguments:
supermarkets -- a list of supermarkets to scrape
force_refresh -- if True, cachefiles will not be used
"""
self.force_refresh = force_refresh
self.supermarkets = supermarkets
self.reactor_control = ReactorControl()
def cache_exists(self, supermarket):
"""Check whether a JSON file already exists for data scraped from
the given supermarket, and if so, whether it was created today.
Note that 'created today' is not the same as 'age < 24 hours'. Prices
are assumed to change overnight so a cachefile created at 9pm
yesterday is considered out of date at 9am today (but a cachefile
created at 9am is not out of date at 9pm).
Keyword arguments:
supermarket -- the supermarket whose cachefile should be checked
"""
cachefile = supermarket_filename(supermarket)
if not isfile(cachefile):
return False
mtime = datetime.fromtimestamp(getmtime(cachefile))
now = datetime.fromtimestamp(time())
return mtime.day == now.day
def setup_crawler(self, supermarket, reactor_control):
"""Set up the Scrapy crawler.
See http://doc.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script.
Keyword arguments:
supermarket -- the supermarket whose crawler should be set up
"""
cachefile = supermarket_filename(supermarket)
if isfile(cachefile):
remove(cachefile)
settings = get_project_settings()
url = supermarket_url(supermarket)
settings.set('FEED_URI', supermarket_filename(supermarket))
spider = MySupermarketSpider(url)
crawler = Crawler(settings)
crawler.signals.connect(reactor_control.remove_crawler, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
reactor_control.add_crawler()
def get_data(self):
"""Main entry point for the scraper class. Crawl or get data from cache
for the configured supermarkets. Supermarkets are set in __init__.
"""
if self.force_refresh:
supermarkets_to_crawl = self.supermarkets
else:
supermarkets_to_crawl = [x for x in self.supermarkets if not self.cache_exists(x)]
if supermarkets_to_crawl:
reactor_control = ReactorControl()
for supermarket in supermarkets_to_crawl:
self.setup_crawler(supermarket, reactor_control)
reactor_control.start_crawling()
|
hmcc/price-search
|
scraper/scraper.py
|
Python
|
mit
| 3,591
|
from django.contrib.sites import models as site_models
from django.db import models
def _get_current_site(request):
# Attempts to use monodjango.middleware.SiteProviderMiddleware
try:
return Site.objects.get_current(request)
except TypeError:
pass
return Site.objects.get_current()
class ThemeManager(models.Manager):
def get_current_by_request(self, request=None):
""" Gets the current """
return self.get_current(_get_current_site(request))
def get_current(self, site=None):
""" Gets the current system theme. """
if site is None:
site = Site.objects.get_current()
return self.get(sites_enabled=site)
def get_list_by_request(self, request=None):
""" Gets a list of themes that are available for the given request. """
return self.get_list(_get_current_site(request))
def get_list(self, site=None):
""" Get a list of themes available on a specific site. """
if site is None:
site = Site.objects.get_current()
return self.filter(sites_available=site)
def get_default(self):
return self.get(default=True)
|
LimpidTech/django-themes
|
themes/managers.py
|
Python
|
mit
| 1,188
|
# AMDG
import unittest
from datetime import datetime
from balance import BasicLoader, RepayLoader
from base_test import BaseTest
class LoaderTests(BaseTest, unittest.TestCase):
def test_basic_loader(self):
loader = BasicLoader('tests/data/basic_loader')
entries, errors = loader.load(return_errors=True)
self.assertEquals(1, len(entries))
entry = entries[0]
self.assertEquals(-5.00, entry.amount)
self.assertEquals(2, len(errors))
self.assertEquals(errors[0]['entry'], '\n')
self.assertTrue(errors[0]['error'].message.startswith('Not a valid entry'))
self.assertEquals(errors[1]['entry'], 'this is a bad line:\n')
self.assertTrue(errors[1]['error'].message.startswith('Not a valid entry'))
def test_repay_loader(self):
loader = RepayLoader('tests/data/repay_loader')
entries, errors = loader.load(return_errors=True)
self.assertEquals(4, len(entries))
entry = entries.pop()
self.assertEquals(-11.00, entry.amount)
self.assertEquals('repay', entry.category)
self.assertEquals('#2', entry.description)
self.assertEquals('Joe', entry.vendor)
self.assertEquals('cash', entry.method)
self.assertEquals(datetime(2014,10,3), entry.date)
for e in entries:
self.assertTrue(e.method in RepayLoader.methods)
self.assertEquals(2, len(errors))
self.assertEquals(errors[0]['entry'], '#hello\n')
self.assertTrue(errors[0]['error'].message.startswith('Not a valid entry'))
self.assertEquals(errors[1]['entry'], 'bad line\n')
self.assertTrue(errors[1]['error'].message.startswith('Not a valid entry'))
if __name__ == '__main__':
unittest.main()
|
pilliq/balance
|
tests/test_loaders.py
|
Python
|
mit
| 1,764
|
# -*- coding: utf-8 -*-
GENDER = ((u'男',u'男'),(u'女',u'女'))
|
youtaya/knight
|
fuzzybee/utils/constant.py
|
Python
|
mit
| 67
|
#!/usr/bin/python
"""
Package constants
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = 'cyclotomiq@gmail.com'
__status__ = 'pre-alpha'
from aenum import Enum
class RequestTypes(Enum):
SIMPLEPOINTIMAGERY = 1
DIVAGIS = 2
COMPOSITEDPOINTIMAGERY = 3
class RequestStatusCodes(Enum):
CLOSED = 0
CREATED = 1
QUEUED = 2
PROCESSING = 3
COMPLETED = 4
REJECTED = 5
ERROR = 6
imgCollections = {'Landsat8' : ImageCollection('LANDSAT/LC08/C01/T1',
['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','BQA'],
'04/13/2011',
'10/07/2017',
30),
'Landsat7' : ImageCollection('LANDSAT/LE07/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1999',
'09/17/2017',
30),
'Landsat5' : ImageCollection('LANDSAT/LT05/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1984',
'05/05/2012',
30),
'Sentinel2msi' : ImageCollection('COPERNICUS/S2',
['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','QA10','QA20','QA60'],
'01/23/2015',
'10/20/2017',
30),
'Sentinel2sar' : ImageCollection('COPERNICUS/S1_GRD',
['VV', 'HH',['VV', 'VH'], ['HH','HV']],
'10/03/2014',
'10/20/2017',
30),
'ModisThermalAnomalies' : ImageCollection('MODIS/006/MOD14A1',
['FireMask', 'MaxFRP','sample', 'QA'],
'02/18/2000',
'10/23/2017',
30)
}
if __name__ == "__main__":
print('set of package constants.')
|
krishnab-datakind/mining-data-acquisition
|
data_gather/PackageConstants.py
|
Python
|
mit
| 4,082
|
from Models.Submission import Submission
from Core.Database import Database
from Core.Scorer import Score
from sqlalchemy import func, desc
class Ranking():
@staticmethod
def get_all():
session = Database.session()
scores = session.query(Score).order_by(desc(Score.score)).all()
return [{"student_id": s.student_id,
"submissions": session.query(func.count(Submission.id))
.filter(Submission.student_id == s.student_id).scalar(),
"score": s.score}
for s in scores]
|
brnomendes/grader-edx
|
Core/Ranking.py
|
Python
|
mit
| 564
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from gaegraph.model import Node, Arc
class Escravo(Node):
name = ndb.StringProperty(required=True)
age = ndb.IntegerProperty()
birth = ndb.DateProperty(auto_now=True)
price = ndb.FloatProperty()
estatura = ndb.FloatProperty()
class DonoArco(Arc):
destination = ndb.KeyProperty(Escravo, required=True)
|
renzon/fatec-script-2
|
backend/apps/escravo_app/modelos.py
|
Python
|
mit
| 446
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 09 14:51:02 2015
@author: Methinee
"""
import pandas as pd
import numpy as np
from collections import defaultdict
from astropy.table import Table, Column
df = pd.read_csv('../data/CS_table_No2_No4_new.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False)
headers=list(df.columns.values)
subjects = {'courseId':[]}
students = {'studentId':[]}
years = [52,53,54,55,56]
semester = [1,2]
key_sub = defaultdict(list)
key_std = defaultdict(list)
key=[]
countSub = 0
countStd = 0
#Create dictionary of list subjects
for sub in df[headers[4]]:
if sub not in subjects['courseId']:
subjects['courseId'].append(sub)
countSub = countSub+1
for keyCol in subjects['courseId']:
key_sub[countSub] = keyCol
#print subjects["courseId"]
#print "number of subjects are ",countSub
print "-----------------------------------------------"
print key_sub
print "-----------------------------------------------"
#Create dictionary of list students
for std in df[headers[0]]:
if std not in students['studentId']:
students['studentId'].append(std)
countStd = countStd+1
# for keyRow in students['studentId']:
# for y in years:
# students['studentId'].append(y)
#print students['studentId']
#print "number of students are ",countStd
print "-----------------------------------------------"
#create table row are stdId+years+semester, column is key of subjects
column = key_sub
t = Table(column , names=(subjects['courseId']))
firstCol = students
t = Table(firstCol, names=(firstCol))
print t
"""table_No2_No4_out = pd.DataFrame(subjects)
writer = pd.ExcelWriter("table_No2_No4_fomat.xlsx")
table_No2_No4_out.to_excel(writer,"grade")
writer.save()"""
|
wasit7/book_pae
|
pae/forcast/src/csv/CS_table_No2_No4.py
|
Python
|
mit
| 1,865
|
import os
import json
from subprocess import check_output, CalledProcessError
#Constants
_TREE_PATH="data/graph/"
def renderGraph(query):
"""
Returns the path to a svg file that
contains the graph render of the query.
Creates the svg file itself if it
does not already exist.
"""
#Compute the hash of the query string
qhash = hashFunc(query)
if (not os.path.exists(_TREE_PATH+str(qhash))):
#Create bucket if it doesn't already exist.
os.makedirs(_TREE_PATH+str(qhash))
#Create the lookup table for the bucket.
bucketTableFile=open(_TREE_PATH+str(qhash)+"/lookup.json",'w')
bucketTableFile.write("{}")
bucketTableFile.close()
#Load bucketTable
bucketTableFile=open(_TREE_PATH+str(qhash)+"/lookup.json",'r+')
bucketTable = json.loads(bucketTableFile.read())
if query not in bucketTable.keys():
#File is not cache! Create PNG in bucket.
filename=str(len(os.listdir(_TREE_PATH+str(qhash))))+".svg"
fn=query.split(",")[0]
rest=query.split(",")[1:]
myParams={i[0]:i[1] for i in map(lambda x:x.split("="),rest)}
if not TeXToGraph(fn,_TREE_PATH+str(qhash),filename,myParams):
#An error has occurred while rendering the LaTeX.
return open(handleTeXRenderError("An error has occurred while rendering LaTeX."))
#Update bucketTable
bucketTable[query]=filename
#Write back to bucketTableFile
bucketTableFile.seek(0)
bucketTableFile.write(json.dumps(bucketTable))
bucketTableFile.close()
#Return path to newly created/existing file
return open(_TREE_PATH+str(qhash)+"/"+bucketTable[query]).read()
def hashFunc(s):
"""
Call some hashfunc and return the result.
Goes "hashy hashy".
"""
return abs(hash(s))
def TeXToGraph(fn,targetDir,name,paramsIn):
"""
Renders a graph in query to a svg in targetDir named name. Return true if successful, false if not.
"""
params={
'xmin':-10,
'xmax':10,
'ymin':-10,
'ymax':10,
'xlabel':"x",
'ylabel':"y",
}
for i in paramsIn:
if i!='xlabel' and i !='ylabel':
params[i]=int(paramsIn[i])
else:
params[i]=paramsIn[i]
print params
print fn
try:
check_output("./to_graph.sh {0} {1} {2} {3} {4} {5} {6} {7} {8}".format(fn,params['xmin'],params['xmax'],params['ymin'],params['ymax'],params['xlabel'],params['ylabel'],targetDir,name).split())
except CalledProcessError:
return False
return True
def handleTeXRenderError(errorMsg):
"""
Handles an error encountered while attempting to render a TeX string
"""
print errorMsg
return "assets/img/error.png"
|
texit/texit
|
graph.py
|
Python
|
mit
| 2,827
|
quicksort(A, lo, hi):
if lo < hi:
p := partition(A, lo, hi)
quicksort(A, lo, p - 1)
quicksort(A, p + 1, hi)
|
Chasego/codi
|
util/basic/quicksort.py
|
Python
|
mit
| 120
|
# Basic command-line interface to manage docker containers which will use an
# image stored in a dockerhub registry - 'pokeybill/bftest'
import click
from click.testing import CliRunner
import docker
import sys
import time
import requests
this = sys.modules[__name__]
BASE_URL = 'unix://var/run/docker.sock'
REGISTRY = 'pokeybill/bftest'
DIGEST = 'sha256:79215d32e5896c1ccd3f57d22ee6aaa7c9d79c9c87737f2b96673186de6ab060'
@click.group()
def default():
""" A basic docker container management wrapper """
pass
@click.command()
@click.argument('container')
def run(container):
""" attempts to start the docker container specified """
try:
fetch_client()
this.client.pull(REGISTRY)
start_container(container)
result = health_check(container)
except docker.errors.APIError as e:
click.echo('[!] Docker API Error: {}'.format(e))
sys.exit(1)
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
@click.command()
@click.argument('container')
def stop(container):
""" attempts to stop the docker container specified """
try:
fetch_client()
this.client.stop(container)
this.client.remove_container(container)
except docker.errors.APIError as e:
click.echo('[!] Error stopping container: {}'.format(e))
sys.exit(1)
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
@click.command()
def test():
""" basic functional test to ensure containers can be managed """
click.echo('[*] Testing docker container creation/removal')
cont_name = 'funky_aardvark'
try:
runner = CliRunner()
# Test the RUN command
result = runner.invoke(run, [cont_name])
result_txt = result.output.strip('\n')
assert result.exit_code == 0, '[!] Application START failed: {}'.format(result_txt)
assert 'Your app is running on' in result.output, \
'[!] Unexpected output: {}'.format(result.output)
click.echo(result_txt)
# Test container access
click.echo('[*] Ensuring we can communicate with the containerized application')
result = requests.get('http://127.0.0.1:8888/hello')
assert result.status_code == 200, \
'[!] Unexpected HTTP response: {}'.format(result.status_code)
click.echo('\t{}'.format(result.text))
# Test the STOP command
result = runner.invoke(stop, [cont_name])
result_txt = result.output.strip('\n')
assert result.exit_code == 0, '[!] Application STOP failed: {}'.format(result_txt)
click.echo('[*] Container {} stopped'.format(cont_name))
except requests.exceptions.ConnectionError as e:
click.echo('[!] Failed to communicate with the application')
click.echo(e[0])
except AssertionError as e:
click.echo('[*] Test failed - {}'.format(e))
except KeyboardInterrupt, SystemExit:
click.echo('[!] Aborting')
else:
click.echo('[*] Test succeeded')
default.add_command(run)
default.add_command(stop)
default.add_command(test)
# Functions start here
def health_check(inst_name):
def __check_state():
cont_state = this.client.inspect_container(inst_name)['State']
if cont_state['Status']=='running':
return cont_state['Health']['Status']
else:
click.echo('[!] Container is not running!')
repeat = 0
while True:
cont_status = __check_state()
if cont_status == 'healthy':
click.echo('[*] Your app is running on http://127.0.0.1:8888')
return True
elif cont_status == 'starting':
if repeat > 6:
return
time.sleep(1)
repeat += 1
else:
click.echo('[!] Container status: {}'.format(cont_status))
return
def start_container(inst_name):
this.client.create_container(
REGISTRY,
detach=False,
name=inst_name,
ports=[8888],
host_config=this.client.create_host_config(
port_bindings={8888: ('127.0.0.1',8888)}
),
)
this.client.start(inst_name)
def fetch_client(base_url=BASE_URL):
this.client = docker.APIClient(base_url=base_url, version='1.24')
try:
this.client.version()
except requests.exceptions.ConnectionError as e:
click.echo('[!] Unable to connect to Docker daemon @ {}'.format(BASE_URL))
sys.exit(1)
if __name__=="__main__":
default()
|
wnormandin/bftest_cli
|
cli/dockcli.py
|
Python
|
mit
| 4,675
|
# Author:
# Ross Sbriscia, April 2016
import random
import sys
import traceback
import os
import math
import argparse
import time
import random_connected_graph
# Parses Arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Simulates a P2P network with random dynamic connectivity in order to examines runtime \
and space complexity of search algorithms.',
epilog="Examples:\n\
\n\
python SimulateP2PNetwork.py 30 randomwalk -o output\n\
This will simulate a network of 30 vertices and use the random walk algorithm, outputs in output.csv\n\
\n\
python SimulateP2PNetwork.py 500 bfs -e 20\n\
This will simulate a network of 500 verticies, using the BFS algorithm, and run a \
new experiment (assign new start and end nodes) on each graph 20 times.\n\
\n\
python SimulateP2PNetwork.py 350 randomwalk -e 30 -t 200\n\
This will simulate a network of 500 verticies, using the randomwalk algorithm, run a \
new trial (assign new start and end nodes) on each graph 30 times and re-build (assign new edges) the graph 200 times.\n\
\n\
Output: a csv in the following form (one line per experiment);\n\
num vertices, num edges, algorithm used, average length of path found, if file NEVER found, average data per hop (bytes), runningtime (seconds)\n\
Ex:\n\
250,10898,randomwalk,32373,False,32,3.237650\n\
250,10898,randomwalk,25520,False,32,2.553203\n\
250,10898,randomwalk,28501,False,32,2.851121\n\
.\n\
.\n\
.")
parser.add_argument('vertices', type=int,
help='Number of vertices in the simulated network (Recommend <= 1000)')
parser.add_argument('algorithm', choices=['randomwalk', 'bfs', 'lazyrandomwalk', 'rw', 'lrw'],
help='Choose an algorithm to use in the simulation')
parser.add_argument('-r', type=int,
help='(Default 10) Number of RUNS per EXPERIMENTS (exact same start and end nodes, on network with same edges)')
parser.add_argument('-e', type=int,
help='(Default 50) Number of EXPERIMENTS per TRIAL (new start and end nodes, on network with same edges)')
parser.add_argument('-t', type=int,
help='(Default 100) Number of TRIALS (times graph will be re-built with new edges)')
parser.add_argument('-o',
help='Specify output filename')
args = parser.parse_args()
numberOfVertices = args.vertices
algorithm = args.algorithm
numberOfFailiures = 0
pofEdgeFail = 0.001
maxPathLength = 4 * (math.pow(numberOfVertices, 3))
if args.t:
numberOfTrails = args.t
else:
numberOfTrails = 100
if (algorithm == "rw"):
algorithm = "randomwalk";
if (algorithm == "lrw"):
algorithm = "lazyrandomwalk";
if args.o:
outfileName = "./Data/" + args.o
else:
if (algorithm == "randomwalk"):
outfileName = "./Data/RandomWalkSimulation.csv"
if (algorithm == "bfs"):
outfileName = "./Data/BFSSimulation.csv"
if (algorithm == "lazyrandomwalk"):
outfileName = "./Data/LazyRandomWalkSimulation.csv"
if args.e:
numberOfExperiments = args.e
else:
numberOfExperiments = 50
if args.r:
numberOfRuns = args.r
else:
numberOfRuns = 10
# Code Starts Here!
# Returns the maximum possible number of edges of an undirected graph with
# n verticies
def maxEdges(n):
return (n * (n - 1)) / 2
# Runs the algorithm and collects data
def runAlgorithm(graph, startHost, endHost):
# Algorithm sends a constant ammount of data per hop. 8 bytes of data.
if (algorithm == "randomwalk"):
hops = []
currHost = random.choice(graph.neighborSet[startHost])
start_Per_Host_Computation_Time = time.time()*1000
while (len(hops) <= maxPathLength and currHost != endHost):
deadset = []
for neighbor in graph.neighborSet[currHost]: # Calculates random edge failiure
if (pofEdgeFail > random.random()):
deadset.append(neighbor)
activeneighbors = list(set(graph.neighborSet[currHost])-set(deadset));
if not activeneighbors: # if every edge dies
currHost = random.choice(graph.neighborSet[currHost]);
else:
currHost = random.choice(activeneighbors)
hops.append(currHost)
finish_Per_Host_Computation_Time = time.time()*1000
return hops, (finish_Per_Host_Computation_Time - start_Per_Host_Computation_Time)
if (algorithm == "bfs"):
# maintain a queue of paths
queue = []
# push the first path into the queue
queue.append([startHost])
starttime = time.time()*1000
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
currHost = path[-1]
# path found
if currHost == endHost:
finishtime = time.time()*1000
return path, (finishtime - starttime)
# enumerate all adjacent nodes, construct a new path and push it
# into the queue
for adjacent in graph.neighborSet[currHost]:
if (pofEdgeFail > random.random()):
continue;
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
finishtime = time.time()*1000
return path, (finishtime - starttime);
if (algorithm == "lazyrandomwalk"):
hops = []
currHost = random.choice(graph.neighborSet[startHost])
start_Per_Host_Computation_Time = time.time()*1000
while (len(hops) <= maxPathLength and currHost != endHost):
takeSelfLoop = random.random();
deadset = []
for neighbor in graph.neighborSet[currHost]: # Calculates random edge failiure
if (pofEdgeFail > random.random()):
deadset.append(neighbor)
activeneighbors = list(set(graph.neighborSet[currHost])-set(deadset));
if not activeneighbors:
currHost = random.choice(graph.neighborSet[currHost]);
else:
if (takeSelfLoop < .5): # If we do NOT take a self-loop
currHost = random.choice(activeneighbors)
hops.append(currHost)
finish_Per_Host_Computation_Time = time.time()*1000
return hops, (finish_Per_Host_Computation_Time - start_Per_Host_Computation_Time)
# Returns a connected graph with randomized edges.
# This simulates the reality of real p2p networks,
# as hosts very often come online and go offline.
def shuffleConnections():
edges = random.randrange(numberOfVertices - 1, maxEdges(numberOfVertices))
verts = [x for x in xrange(int(numberOfVertices))]
network = random_connected_graph.random_walk(verts, edges)
network.sort_edges()
# print "Generated network containing:\n\
#%d hosts (vertices)\n\
#%d connections (edges)" % (len(network.nodes), len(network.edges));
return network
# Shuffles node looking for the file, and node which has the file
def shuffleHostsOfInterest():
startNode = random.randrange(0, numberOfVertices - 1)
endNode = random.randrange(0, numberOfVertices - 1)
if (startNode == endNode):
return shuffleHostsOfInterest()
else:
return startNode, endNode
# setup loading bar
print "\n\nRunning Simulations..."
trialRatio = math.ceil(numberOfTrails * 2 / 100)
# sys.stdout.write("[%s]" % (" " * 50))
# sys.stdout.flush()
# sys.stdout.write("\b" * (50 + 1)) # return to start of line, after '['
# Run the expirement
outputCSV = open(outfileName, 'w')
sys.stdout.write("\033[92m0\033[0m")
for currentTrial in range(numberOfTrails):
network = shuffleConnections()
for currentExeriment in range(numberOfExperiments):
startHost, endHost = shuffleHostsOfInterest()
hops = []
runtime = []
spacePerHost = 32 # Estimated 32 bytes of data for the base request.
for currentRun in range(numberOfRuns):
numhops, searchtime = runAlgorithm(network, startHost, endHost)
runtime.append(searchtime)
hops.append(sum(numhops))
averageRunTime = sum(runtime) / len(runtime)
averageHopLength = sum(hops) / len(hops)
# Adds link latency into computation, estimating 0.0001 second
# transmission delay/hop
averageRunTime += (averageHopLength * 0.1)
if algorithm == "bfs":
spacePerHost += averageHopLength * 32 # Each new host IP needs to be enqueued into the datastructure
includedFailiure = False
# Allows for a 10Mbs (average) upload speed bottleneck on all hosts
averageRunTime += (spacePerHost / 1250)
# Processing Time For Algorithm
averageRunTime += (spacePerHost / 100)
if maxPathLength in hops:
includedFailiure = True
outputCSV.write("%d,%d,%s,%d,%r,%d,%.6f\n" % (numberOfVertices, len(
network.edges), algorithm, averageHopLength, includedFailiure, spacePerHost, averageRunTime))
# Progress
number_of_chars_to_erase = len(str(currentTrial)) + 11 + len(str(numberOfTrails))
print("\033[92mTrial:\t%d/%d\033[0m " % (currentTrial+1,numberOfTrails))
sys.stdout.write('\n')
|
rsbrisci/Math455RandomWalkProject
|
Python Solution v1.0/SimulateP2PNetwork.py
|
Python
|
mit
| 9,332
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NumbersInternational(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'account_id': (str,), # noqa: E501
'iban': (str,), # noqa: E501
'bic': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_id': 'account_id', # noqa: E501
'iban': 'iban', # noqa: E501
'bic': 'bic', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, account_id, iban, bic, *args, **kwargs): # noqa: E501
"""NumbersInternational - a model defined in OpenAPI
Args:
account_id (str): The Plaid account ID associated with the account numbers
iban (str): The International Bank Account Number (IBAN) for the account
bic (str): The Bank Identifier Code (BIC) for the account
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.account_id = account_id
self.iban = iban
self.bic = bic
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
plaid/plaid-python
|
plaid/model/numbers_international.py
|
Python
|
mit
| 7,184
|
import pendulum
locale = "nb"
def test_diff_for_humans():
with pendulum.test(pendulum.datetime(2016, 8, 29)):
diff_for_humans()
def diff_for_humans():
d = pendulum.now().subtract(seconds=1)
assert d.diff_for_humans(locale=locale) == "for 1 sekund siden"
d = pendulum.now().subtract(seconds=2)
assert d.diff_for_humans(locale=locale) == "for 2 sekunder siden"
d = pendulum.now().subtract(minutes=1)
assert d.diff_for_humans(locale=locale) == "for 1 minutt siden"
d = pendulum.now().subtract(minutes=2)
assert d.diff_for_humans(locale=locale) == "for 2 minutter siden"
d = pendulum.now().subtract(hours=1)
assert d.diff_for_humans(locale=locale) == "for 1 time siden"
d = pendulum.now().subtract(hours=2)
assert d.diff_for_humans(locale=locale) == "for 2 timer siden"
d = pendulum.now().subtract(days=1)
assert d.diff_for_humans(locale=locale) == "for 1 dag siden"
d = pendulum.now().subtract(days=2)
assert d.diff_for_humans(locale=locale) == "for 2 dager siden"
d = pendulum.now().subtract(weeks=1)
assert d.diff_for_humans(locale=locale) == "for 1 uke siden"
d = pendulum.now().subtract(weeks=2)
assert d.diff_for_humans(locale=locale) == "for 2 uker siden"
d = pendulum.now().subtract(months=1)
assert d.diff_for_humans(locale=locale) == "for 1 måned siden"
d = pendulum.now().subtract(months=2)
assert d.diff_for_humans(locale=locale) == "for 2 måneder siden"
d = pendulum.now().subtract(years=1)
assert d.diff_for_humans(locale=locale) == "for 1 år siden"
d = pendulum.now().subtract(years=2)
assert d.diff_for_humans(locale=locale) == "for 2 år siden"
d = pendulum.now().add(seconds=1)
assert d.diff_for_humans(locale=locale) == "om 1 sekund"
d = pendulum.now().add(seconds=1)
d2 = pendulum.now()
assert d.diff_for_humans(d2, locale=locale) == "1 sekund etter"
assert d2.diff_for_humans(d, locale=locale) == "1 sekund før"
assert d.diff_for_humans(d2, True, locale=locale) == "1 sekund"
assert d2.diff_for_humans(d.add(seconds=1), True, locale=locale) == "2 sekunder"
def test_format():
d = pendulum.datetime(2016, 8, 28, 7, 3, 6, 123456)
assert d.format("dddd", locale=locale) == "søndag"
assert d.format("ddd", locale=locale) == "søn."
assert d.format("MMMM", locale=locale) == "august"
assert d.format("MMM", locale=locale) == "aug."
assert d.format("A", locale=locale) == "a.m."
assert d.format("Qo", locale=locale) == "3."
assert d.format("Mo", locale=locale) == "8."
assert d.format("Do", locale=locale) == "28."
assert d.format("LT", locale=locale) == "07:03"
assert d.format("LTS", locale=locale) == "07:03:06"
assert d.format("L", locale=locale) == "28.08.2016"
assert d.format("LL", locale=locale) == "28. august 2016"
assert d.format("LLL", locale=locale) == "28. august 2016 07:03"
assert d.format("LLLL", locale=locale) == "søndag 28. august 2016 07:03"
|
sdispater/pendulum
|
tests/localization/test_nb.py
|
Python
|
mit
| 3,021
|
import django
django.setup()
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIClient
from customers.models import Customer, Email
class CustomersTest(APITestCase, TestCase):
def test_api_should_accept_multiple_emails_for_new_customers(self):
customer = self.get_default_customer()
customer['emails'].append({
"address": "jhondoe@stackoverflow.com",
"description": "Professional"
})
customer['emails'].append({
"address": "jhondoe@proto.io",
"description": "Personal"
})
response = self.client.post('/api/customers', customer, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(Email.objects.count(), 2)
def test_api_should_not_accept_empty_email(self):
customer = self.get_default_customer()
response = self.client.post('/api/customers', customer, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_should_not_accept_equal_emails(self):
customer = self.get_default_customer()
customer['emails'].append({
"address": "jhondoe@stackoverflow.com",
"description": "Professional"
})
customer['emails'].append({
"address": "jhondoe@stackoverflow.com",
"description": "Personal"
})
response = self.client.post('/api/customers', customer, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def get_default_customer(self):
return {
"name": "Jhon Doe",
"date_of_birth": "1990-3-3",
"gender": "M",
"emails": []
};
|
guilatrova/customercontrol-api
|
customers/tests.py
|
Python
|
mit
| 1,951
|
import os
import sys
import fnmatch
directory = os.path.dirname(os.path.realpath(sys.argv[0])) #get the directory of your script
for subdir, dirs, files in os.walk(directory):
print(files)
for filename in files:
if fnmatch.fnmatch(filename,'mpsdk_*') > 0:
subdirectoryPath = os.path.relpath(subdir, directory) #get the path to your subdirectory
filePath = os.path.join(subdirectoryPath, filename) #get the path to your file
newFilePath = filePath.replace("mpsdk_","px_") #create the new name
os.rename(filePath, newFilePath) #rename your file
|
mercadopago/px-android
|
scripts/rename_resources.py
|
Python
|
mit
| 559
|
import asyncio
import inspect
import itertools
import string
import typing
from .. import helpers, utils, hints
from ..requestiter import RequestIter
from ..tl import types, functions, custom
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
_MAX_PARTICIPANTS_CHUNK_SIZE = 200
_MAX_ADMIN_LOG_CHUNK_SIZE = 100
_MAX_PROFILE_PHOTO_CHUNK_SIZE = 100
class _ChatAction:
_str_mapping = {
'typing': types.SendMessageTypingAction(),
'contact': types.SendMessageChooseContactAction(),
'game': types.SendMessageGamePlayAction(),
'location': types.SendMessageGeoLocationAction(),
'record-audio': types.SendMessageRecordAudioAction(),
'record-voice': types.SendMessageRecordAudioAction(), # alias
'record-round': types.SendMessageRecordRoundAction(),
'record-video': types.SendMessageRecordVideoAction(),
'audio': types.SendMessageUploadAudioAction(1),
'voice': types.SendMessageUploadAudioAction(1), # alias
'song': types.SendMessageUploadAudioAction(1), # alias
'round': types.SendMessageUploadRoundAction(1),
'video': types.SendMessageUploadVideoAction(1),
'photo': types.SendMessageUploadPhotoAction(1),
'document': types.SendMessageUploadDocumentAction(1),
'file': types.SendMessageUploadDocumentAction(1), # alias
'cancel': types.SendMessageCancelAction()
}
def __init__(self, client, chat, action, *, delay, auto_cancel):
self._client = client
self._chat = chat
self._action = action
self._delay = delay
self._auto_cancel = auto_cancel
self._request = None
self._task = None
self._running = False
async def __aenter__(self):
self._chat = await self._client.get_input_entity(self._chat)
# Since `self._action` is passed by reference we can avoid
# recreating the request all the time and still modify
# `self._action.progress` directly in `progress`.
self._request = functions.messages.SetTypingRequest(
self._chat, self._action)
self._running = True
self._task = self._client.loop.create_task(self._update())
return self
async def __aexit__(self, *args):
self._running = False
if self._task:
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
self._task = None
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
async def _update(self):
try:
while self._running:
await self._client(self._request)
await asyncio.sleep(self._delay)
except ConnectionError:
pass
except asyncio.CancelledError:
if self._auto_cancel:
await self._client(functions.messages.SetTypingRequest(
self._chat, types.SendMessageCancelAction()))
def progress(self, current, total):
if hasattr(self._action, 'progress'):
self._action.progress = 100 * round(current / total)
class _ParticipantsIter(RequestIter):
async def _init(self, entity, filter, search, aggressive):
if isinstance(filter, type):
if filter in (types.ChannelParticipantsBanned,
types.ChannelParticipantsKicked,
types.ChannelParticipantsSearch,
types.ChannelParticipantsContacts):
# These require a `q` parameter (support types for convenience)
filter = filter('')
else:
filter = filter()
entity = await self.client.get_input_entity(entity)
ty = helpers._entity_type(entity)
if search and (filter or ty != helpers._EntityType.CHANNEL):
# We need to 'search' ourselves unless we have a PeerChannel
search = search.casefold()
self.filter_entity = lambda ent: (
search in utils.get_display_name(ent).casefold() or
search in (getattr(ent, 'username', None) or '').casefold()
)
else:
self.filter_entity = lambda ent: True
# Only used for channels, but we should always set the attribute
self.requests = []
if ty == helpers._EntityType.CHANNEL:
self.total = (await self.client(
functions.channels.GetFullChannelRequest(entity)
)).full_chat.participants_count
if self.limit <= 0:
raise StopAsyncIteration
self.seen = set()
if aggressive and not filter:
self.requests.extend(functions.channels.GetParticipantsRequest(
channel=entity,
filter=types.ChannelParticipantsSearch(x),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
) for x in (search or string.ascii_lowercase))
else:
self.requests.append(functions.channels.GetParticipantsRequest(
channel=entity,
filter=filter or types.ChannelParticipantsSearch(search),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
))
elif ty == helpers._EntityType.CHAT:
full = await self.client(
functions.messages.GetFullChatRequest(entity.chat_id))
if not isinstance(
full.full_chat.participants, types.ChatParticipants):
# ChatParticipantsForbidden won't have ``.participants``
self.total = 0
raise StopAsyncIteration
self.total = len(full.full_chat.participants.participants)
users = {user.id: user for user in full.users}
for participant in full.full_chat.participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user):
continue
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
return True
else:
self.total = 1
if self.limit != 0:
user = await self.client.get_entity(entity)
if self.filter_entity(user):
user.participant = None
self.buffer.append(user)
return True
async def _load_next_chunk(self):
if not self.requests:
return True
# Only care about the limit for the first request
# (small amount of people, won't be aggressive).
#
# Most people won't care about getting exactly 12,345
# members so it doesn't really matter not to be 100%
# precise with being out of the offset/limit here.
self.requests[0].limit = min(
self.limit - self.requests[0].offset, _MAX_PARTICIPANTS_CHUNK_SIZE)
if self.requests[0].offset > self.limit:
return True
results = await self.client(self.requests)
for i in reversed(range(len(self.requests))):
participants = results[i]
if not participants.users:
self.requests.pop(i)
continue
self.requests[i].offset += len(participants.participants)
users = {user.id: user for user in participants.users}
for participant in participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user) or user.id in self.seen:
continue
self.seen.add(participant.user_id)
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
class _AdminLogIter(RequestIter):
async def _init(
self, entity, admins, search, min_id, max_id,
join, leave, invite, restrict, unrestrict, ban, unban,
promote, demote, info, settings, pinned, edit, delete
):
if any((join, leave, invite, restrict, unrestrict, ban, unban,
promote, demote, info, settings, pinned, edit, delete)):
events_filter = types.ChannelAdminLogEventsFilter(
join=join, leave=leave, invite=invite, ban=restrict,
unban=unrestrict, kick=ban, unkick=unban, promote=promote,
demote=demote, info=info, settings=settings, pinned=pinned,
edit=edit, delete=delete
)
else:
events_filter = None
self.entity = await self.client.get_input_entity(entity)
admin_list = []
if admins:
if not utils.is_list_like(admins):
admins = (admins,)
for admin in admins:
admin_list.append(await self.client.get_input_entity(admin))
self.request = functions.channels.GetAdminLogRequest(
self.entity, q=search or '', min_id=min_id, max_id=max_id,
limit=0, events_filter=events_filter, admins=admin_list or None
)
async def _load_next_chunk(self):
self.request.limit = min(self.left, _MAX_ADMIN_LOG_CHUNK_SIZE)
r = await self.client(self.request)
entities = {utils.get_peer_id(x): x
for x in itertools.chain(r.users, r.chats)}
self.request.max_id = min((e.id for e in r.events), default=0)
for ev in r.events:
if isinstance(ev.action,
types.ChannelAdminLogEventActionEditMessage):
ev.action.prev_message._finish_init(
self.client, entities, self.entity)
ev.action.new_message._finish_init(
self.client, entities, self.entity)
elif isinstance(ev.action,
types.ChannelAdminLogEventActionDeleteMessage):
ev.action.message._finish_init(
self.client, entities, self.entity)
self.buffer.append(custom.AdminLogEvent(ev, entities))
if len(r.events) < self.request.limit:
return True
class _ProfilePhotoIter(RequestIter):
async def _init(
self, entity, offset, max_id
):
entity = await self.client.get_input_entity(entity)
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.USER:
self.request = functions.photos.GetUserPhotosRequest(
entity,
offset=offset,
max_id=max_id,
limit=1
)
else:
self.request = functions.messages.SearchRequest(
peer=entity,
q='',
filter=types.InputMessagesFilterChatPhotos(),
min_date=None,
max_date=None,
offset_id=0,
add_offset=offset,
limit=1,
max_id=max_id,
min_id=0,
hash=0
)
if self.limit == 0:
self.request.limit = 1
result = await self.client(self.request)
if isinstance(result, types.photos.Photos):
self.total = len(result.photos)
elif isinstance(result, types.messages.Messages):
self.total = len(result.messages)
else:
# Luckily both photosSlice and messages have a count for total
self.total = getattr(result, 'count', None)
async def _load_next_chunk(self):
self.request.limit = min(self.left, _MAX_PROFILE_PHOTO_CHUNK_SIZE)
result = await self.client(self.request)
if isinstance(result, types.photos.Photos):
self.buffer = result.photos
self.left = len(self.buffer)
self.total = len(self.buffer)
elif isinstance(result, types.messages.Messages):
self.buffer = [x.action.photo for x in result.messages
if isinstance(x.action, types.MessageActionChatEditPhoto)]
self.left = len(self.buffer)
self.total = len(self.buffer)
elif isinstance(result, types.photos.PhotosSlice):
self.buffer = result.photos
self.total = result.count
if len(self.buffer) < self.request.limit:
self.left = len(self.buffer)
else:
self.request.offset += len(result.photos)
else:
self.buffer = [x.action.photo for x in result.messages
if isinstance(x.action, types.MessageActionChatEditPhoto)]
self.total = getattr(result, 'count', None)
if len(result.messages) < self.request.limit:
self.left = len(self.buffer)
elif result.messages:
self.request.add_offset = 0
self.request.offset_id = result.messages[-1].id
class ChatMethods:
# region Public methods
def iter_participants(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: float = None,
*,
search: str = '',
filter: 'types.TypeChannelParticipantsFilter' = None,
aggressive: bool = False) -> _ParticipantsIter:
"""
Iterator over the participants belonging to the specified chat.
The order is unspecified.
Arguments
entity (`entity`):
The entity from which to retrieve the participants list.
limit (`int`):
Limits amount of participants fetched.
search (`str`, optional):
Look for participants with this string in name/username.
If ``aggressive is True``, the symbols from this string will
be used.
filter (:tl:`ChannelParticipantsFilter`, optional):
The filter to be used, if you want e.g. only admins
Note that you might not have permissions for some filter.
This has no effect for normal chats or users.
.. note::
The filter :tl:`ChannelParticipantsBanned` will return
*restricted* users. If you want *banned* users you should
use :tl:`ChannelParticipantsKicked` instead.
aggressive (`bool`, optional):
Aggressively looks for all participants in the chat.
This is useful for channels since 20 July 2018,
Telegram added a server-side limit where only the
first 200 members can be retrieved. With this flag
set, more than 200 will be often be retrieved.
This has no effect if a ``filter`` is given.
Yields
The :tl:`User` objects returned by :tl:`GetParticipantsRequest`
with an additional ``.participant`` attribute which is the
matched :tl:`ChannelParticipant` type for channels/megagroups
or :tl:`ChatParticipants` for normal chats.
Example
.. code-block:: python
# Show all user IDs in a chat
async for user in client.iter_participants(chat):
print(user.id)
# Search by name
async for user in client.iter_participants(chat, search='name'):
print(user.username)
# Filter by admins
from telethon.tl.types import ChannelParticipantsAdmins
async for user in client.iter_participants(chat, filter=ChannelParticipantsAdmins):
print(user.first_name)
"""
return _ParticipantsIter(
self,
limit,
entity=entity,
filter=filter,
search=search,
aggressive=aggressive
)
async def get_participants(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_participants()`, but returns a
`TotalList <telethon.helpers.TotalList>` instead.
Example
.. code-block:: python
users = await client.get_participants(chat)
print(users[0].first_name)
for user in users:
if user.username is not None:
print(user.username)
"""
return await self.iter_participants(*args, **kwargs).collect()
get_participants.__signature__ = inspect.signature(iter_participants)
def iter_admin_log(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: float = None,
*,
max_id: int = 0,
min_id: int = 0,
search: str = None,
admins: 'hints.EntitiesLike' = None,
join: bool = None,
leave: bool = None,
invite: bool = None,
restrict: bool = None,
unrestrict: bool = None,
ban: bool = None,
unban: bool = None,
promote: bool = None,
demote: bool = None,
info: bool = None,
settings: bool = None,
pinned: bool = None,
edit: bool = None,
delete: bool = None) -> _AdminLogIter:
"""
Iterator over the admin log for the specified channel.
The default order is from the most recent event to to the oldest.
Note that you must be an administrator of it to use this method.
If none of the filters are present (i.e. they all are `None`),
*all* event types will be returned. If at least one of them is
`True`, only those that are true will be returned.
Arguments
entity (`entity`):
The channel entity from which to get its admin log.
limit (`int` | `None`, optional):
Number of events to be retrieved.
The limit may also be `None`, which would eventually return
the whole history.
max_id (`int`):
All the events with a higher (newer) ID or equal to this will
be excluded.
min_id (`int`):
All the events with a lower (older) ID or equal to this will
be excluded.
search (`str`):
The string to be used as a search query.
admins (`entity` | `list`):
If present, the events will be filtered by these admins
(or single admin) and only those caused by them will be
returned.
join (`bool`):
If `True`, events for when a user joined will be returned.
leave (`bool`):
If `True`, events for when a user leaves will be returned.
invite (`bool`):
If `True`, events for when a user joins through an invite
link will be returned.
restrict (`bool`):
If `True`, events with partial restrictions will be
returned. This is what the API calls "ban".
unrestrict (`bool`):
If `True`, events removing restrictions will be returned.
This is what the API calls "unban".
ban (`bool`):
If `True`, events applying or removing all restrictions will
be returned. This is what the API calls "kick" (restricting
all permissions removed is a ban, which kicks the user).
unban (`bool`):
If `True`, events removing all restrictions will be
returned. This is what the API calls "unkick".
promote (`bool`):
If `True`, events with admin promotions will be returned.
demote (`bool`):
If `True`, events with admin demotions will be returned.
info (`bool`):
If `True`, events changing the group info will be returned.
settings (`bool`):
If `True`, events changing the group settings will be
returned.
pinned (`bool`):
If `True`, events of new pinned messages will be returned.
edit (`bool`):
If `True`, events of message edits will be returned.
delete (`bool`):
If `True`, events of message deletions will be returned.
Yields
Instances of `AdminLogEvent <telethon.tl.custom.adminlogevent.AdminLogEvent>`.
Example
.. code-block:: python
async for event in client.iter_admin_log(channel):
if event.changed_title:
print('The title changed from', event.old, 'to', event.new)
"""
return _AdminLogIter(
self,
limit,
entity=entity,
admins=admins,
search=search,
min_id=min_id,
max_id=max_id,
join=join,
leave=leave,
invite=invite,
restrict=restrict,
unrestrict=unrestrict,
ban=ban,
unban=unban,
promote=promote,
demote=demote,
info=info,
settings=settings,
pinned=pinned,
edit=edit,
delete=delete
)
async def get_admin_log(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_admin_log()`, but returns a ``list`` instead.
Example
.. code-block:: python
# Get a list of deleted message events which said "heck"
events = await client.get_admin_log(channel, search='heck', delete=True)
# Print the old message before it was deleted
print(events[0].old)
"""
return await self.iter_admin_log(*args, **kwargs).collect()
get_admin_log.__signature__ = inspect.signature(iter_admin_log)
def iter_profile_photos(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: int = None,
*,
offset: int = 0,
max_id: int = 0) -> _ProfilePhotoIter:
"""
Iterator over a user's profile photos or a chat's photos.
The order is from the most recent photo to the oldest.
Arguments
entity (`entity`):
The entity from which to get the profile or chat photos.
limit (`int` | `None`, optional):
Number of photos to be retrieved.
The limit may also be `None`, which would eventually all
the photos that are still available.
offset (`int`):
How many photos should be skipped before returning the first one.
max_id (`int`):
The maximum ID allowed when fetching photos.
Yields
Instances of :tl:`Photo`.
Example
.. code-block:: python
# Download all the profile photos of some user
async for photo in client.iter_profile_photos(user):
await client.download_media(photo)
"""
return _ProfilePhotoIter(
self,
limit,
entity=entity,
offset=offset,
max_id=max_id
)
async def get_profile_photos(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_profile_photos()`, but returns a
`TotalList <telethon.helpers.TotalList>` instead.
Example
.. code-block:: python
# Get the photos of a channel
photos = await client.get_profile_photos(channel)
# Download the oldest photo
await client.download_media(photos[-1])
"""
return await self.iter_profile_photos(*args, **kwargs).collect()
get_profile_photos.__signature__ = inspect.signature(iter_profile_photos)
def action(
self: 'TelegramClient',
entity: 'hints.EntityLike',
action: 'typing.Union[str, types.TypeSendMessageAction]',
*,
delay: float = 4,
auto_cancel: bool = True) -> 'typing.Union[_ChatAction, typing.Coroutine]':
"""
Returns a context-manager object to represent a "chat action".
Chat actions indicate things like "user is typing", "user is
uploading a photo", etc.
If the action is ``'cancel'``, you should just ``await`` the result,
since it makes no sense to use a context-manager for it.
See the example below for intended usage.
Arguments
entity (`entity`):
The entity where the action should be showed in.
action (`str` | :tl:`SendMessageAction`):
The action to show. You can either pass a instance of
:tl:`SendMessageAction` or better, a string used while:
* ``'typing'``: typing a text message.
* ``'contact'``: choosing a contact.
* ``'game'``: playing a game.
* ``'location'``: choosing a geo location.
* ``'record-audio'``: recording a voice note.
You may use ``'record-voice'`` as alias.
* ``'record-round'``: recording a round video.
* ``'record-video'``: recording a normal video.
* ``'audio'``: sending an audio file (voice note or song).
You may use ``'voice'`` and ``'song'`` as aliases.
* ``'round'``: uploading a round video.
* ``'video'``: uploading a video file.
* ``'photo'``: uploading a photo.
* ``'document'``: uploading a document file.
You may use ``'file'`` as alias.
* ``'cancel'``: cancel any pending action in this chat.
Invalid strings will raise a ``ValueError``.
delay (`int` | `float`):
The delay, in seconds, to wait between sending actions.
For example, if the delay is 5 and it takes 7 seconds to
do something, three requests will be made at 0s, 5s, and
7s to cancel the action.
auto_cancel (`bool`):
Whether the action should be cancelled once the context
manager exists or not. The default is `True`, since
you don't want progress to be shown when it has already
completed.
Returns
Either a context-manager object or a coroutine.
Example
.. code-block:: python
# Type for 2 seconds, then send a message
async with client.action(chat, 'typing'):
await asyncio.sleep(2)
await client.send_message(chat, 'Hello world! I type slow ^^')
# Cancel any previous action
await client.action(chat, 'cancel')
# Upload a document, showing its progress (most clients ignore this)
async with client.action(chat, 'document') as action:
await client.send_file(chat, zip_file, progress_callback=action.progress)
"""
if isinstance(action, str):
try:
action = _ChatAction._str_mapping[action.lower()]
except KeyError:
raise ValueError('No such action "{}"'.format(action)) from None
elif not isinstance(action, types.TLObject) or action.SUBCLASS_OF_ID != 0x20b2cc21:
# 0x20b2cc21 = crc32(b'SendMessageAction')
if isinstance(action, type):
raise ValueError('You must pass an instance, not the class')
else:
raise ValueError('Cannot use {} as action'.format(action))
if isinstance(action, types.SendMessageCancelAction):
# ``SetTypingRequest.resolve`` will get input peer of ``entity``.
return self(functions.messages.SetTypingRequest(
entity, types.SendMessageCancelAction()))
return _ChatAction(
self, entity, action, delay=delay, auto_cancel=auto_cancel)
async def edit_admin(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'hints.EntityLike',
*,
change_info: bool = None,
post_messages: bool = None,
edit_messages: bool = None,
delete_messages: bool = None,
ban_users: bool = None,
invite_users: bool = None,
pin_messages: bool = None,
add_admins: bool = None,
is_admin: bool = None,
title: str = None) -> types.Updates:
"""
Edits admin permissions for someone in a chat.
Raises an error if a wrong combination of rights are given
(e.g. you don't have enough permissions to grant one).
Unless otherwise stated, permissions will work in channels and megagroups.
Arguments
entity (`entity`):
The channel, megagroup or chat where the promotion should happen.
user (`entity`):
The user to be promoted.
change_info (`bool`, optional):
Whether the user will be able to change info.
post_messages (`bool`, optional):
Whether the user will be able to post in the channel.
This will only work in broadcast channels.
edit_messages (`bool`, optional):
Whether the user will be able to edit messages in the channel.
This will only work in broadcast channels.
delete_messages (`bool`, optional):
Whether the user will be able to delete messages.
ban_users (`bool`, optional):
Whether the user will be able to ban users.
invite_users (`bool`, optional):
Whether the user will be able to invite users. Needs some testing.
pin_messages (`bool`, optional):
Whether the user will be able to pin messages.
add_admins (`bool`, optional):
Whether the user will be able to add admins.
is_admin (`bool`, optional):
Whether the user will be an admin in the chat.
This will only work in small group chats.
Whether the user will be an admin in the chat. This is the
only permission available in small group chats, and when
used in megagroups, all non-explicitly set permissions will
have this value.
Essentially, only passing ``is_admin=True`` will grant all
permissions, but you can still disable those you need.
title (`str`, optional):
The custom title (also known as "rank") to show for this admin.
This text will be shown instead of the "admin" badge.
This will only work in channels and megagroups.
When left unspecified or empty, the default localized "admin"
badge will be shown.
Returns
The resulting :tl:`Updates` object.
Example
.. code-block:: python
# Allowing `user` to pin messages in `chat`
await client.edit_admin(chat, user, pin_messages=True)
# Granting all permissions except for `add_admins`
await client.edit_admin(chat, user, is_admin=True, add_admins=False)
"""
entity = await self.get_input_entity(entity)
user = await self.get_input_entity(user)
ty = helpers._entity_type(user)
if ty != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
perm_names = (
'change_info', 'post_messages', 'edit_messages', 'delete_messages',
'ban_users', 'invite_users', 'pin_messages', 'add_admins'
)
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.CHANNEL:
# If we try to set these permissions in a megagroup, we
# would get a RIGHT_FORBIDDEN. However, it makes sense
# that an admin can post messages, so we want to avoid the error
if post_messages or edit_messages:
# TODO get rid of this once sessions cache this information
if entity.channel_id not in self._megagroup_cache:
full_entity = await self.get_entity(entity)
self._megagroup_cache[entity.channel_id] = full_entity.megagroup
if self._megagroup_cache[entity.channel_id]:
post_messages = None
edit_messages = None
perms = locals()
return await self(functions.channels.EditAdminRequest(entity, user, types.ChatAdminRights(**{
# A permission is its explicit (not-None) value or `is_admin`.
# This essentially makes `is_admin` be the default value.
name: perms[name] if perms[name] is not None else is_admin
for name in perm_names
}), rank=title or ''))
elif ty == helpers._EntityType.CHAT:
# If the user passed any permission in a small
# group chat, they must be a full admin to have it.
if is_admin is None:
is_admin = any(locals()[x] for x in perm_names)
return await self(functions.messages.EditChatAdminRequest(
entity, user, is_admin=is_admin))
else:
raise ValueError('You can only edit permissions in groups and channels')
async def edit_permissions(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'typing.Optional[hints.EntityLike]' = None,
until_date: 'hints.DateLike' = None,
*,
view_messages: bool = True,
send_messages: bool = True,
send_media: bool = True,
send_stickers: bool = True,
send_gifs: bool = True,
send_games: bool = True,
send_inline: bool = True,
send_polls: bool = True,
change_info: bool = True,
invite_users: bool = True,
pin_messages: bool = True) -> types.Updates:
"""
Edits user restrictions in a chat.
Set an argument to `False` to apply a restriction (i.e. remove
the permission), or omit them to use the default `True` (i.e.
don't apply a restriction).
Raises an error if a wrong combination of rights are given
(e.g. you don't have enough permissions to revoke one).
By default, each boolean argument is `True`, meaning that it
is true that the user has access to the default permission
and may be able to make use of it.
If you set an argument to `False`, then a restriction is applied
regardless of the default permissions.
It is important to note that `True` does *not* mean grant, only
"don't restrict", and this is where the default permissions come
in. A user may have not been revoked the ``pin_messages`` permission
(it is `True`) but they won't be able to use it if the default
permissions don't allow it either.
Arguments
entity (`entity`):
The channel or megagroup where the restriction should happen.
user (`entity`, optional):
If specified, the permission will be changed for the specific user.
If left as `None`, the default chat permissions will be updated.
until_date (`DateLike`, optional):
When the user will be unbanned.
If the due date or duration is longer than 366 days or shorter than
30 seconds, the ban will be forever. Defaults to ``0`` (ban forever).
view_messages (`bool`, optional):
Whether the user is able to view messages or not.
Forbidding someone from viewing messages equals to banning them.
This will only work if ``user`` is set.
send_messages (`bool`, optional):
Whether the user is able to send messages or not.
send_media (`bool`, optional):
Whether the user is able to send media or not.
send_stickers (`bool`, optional):
Whether the user is able to send stickers or not.
send_gifs (`bool`, optional):
Whether the user is able to send animated gifs or not.
send_games (`bool`, optional):
Whether the user is able to send games or not.
send_inline (`bool`, optional):
Whether the user is able to use inline bots or not.
send_polls (`bool`, optional):
Whether the user is able to send polls or not.
change_info (`bool`, optional):
Whether the user is able to change info or not.
invite_users (`bool`, optional):
Whether the user is able to invite other users or not.
pin_messages (`bool`, optional):
Whether the user is able to pin messages or not.
Returns
The resulting :tl:`Updates` object.
Example
.. code-block:: python
from datetime import timedelta
# Banning `user` from `chat` for 1 minute
await client.edit_permissions(chat, user, timedelta(minutes=1),
view_messages=False)
# Banning `user` from `chat` forever
await client.edit_permissions(chat, user, view_messages=False)
# Kicking someone (ban + un-ban)
await client.edit_permissions(chat, user, view_messages=False)
await client.edit_permissions(chat, user)
"""
entity = await self.get_input_entity(entity)
ty = helpers._entity_type(entity)
if ty != helpers._EntityType.CHANNEL:
raise ValueError('You must pass either a channel or a supergroup')
rights = types.ChatBannedRights(
until_date=until_date,
view_messages=not view_messages,
send_messages=not send_messages,
send_media=not send_media,
send_stickers=not send_stickers,
send_gifs=not send_gifs,
send_games=not send_games,
send_inline=not send_inline,
send_polls=not send_polls,
change_info=not change_info,
invite_users=not invite_users,
pin_messages=not pin_messages
)
if user is None:
return await self(functions.messages.EditChatDefaultBannedRightsRequest(
peer=entity,
banned_rights=rights
))
user = await self.get_input_entity(user)
ty = helpers._entity_type(user)
if ty != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
if isinstance(user, types.InputPeerSelf):
raise ValueError('You cannot restrict yourself')
return await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=rights
))
async def kick_participant(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'typing.Optional[hints.EntityLike]'
):
"""
Kicks a user from a chat.
Kicking yourself (``'me'``) will result in leaving the chat.
.. note::
Attempting to kick someone who was banned will remove their
restrictions (and thus unbanning them), since kicking is just
ban + unban.
Arguments
entity (`entity`):
The channel or chat where the user should be kicked from.
user (`entity`, optional):
The user to kick.
Example
.. code-block:: python
# Kick some user from some chat
await client.kick_participant(chat, user)
# Leaving chat
await client.kick_participant(chat, 'me')
"""
entity = await self.get_input_entity(entity)
user = await self.get_input_entity(user)
if helpers._entity_type(user) != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.CHAT:
await self(functions.messages.DeleteChatUserRequest(entity.chat_id, user))
elif ty == helpers._EntityType.CHANNEL:
if isinstance(user, types.InputPeerSelf):
await self(functions.channels.LeaveChannelRequest(entity))
else:
await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=types.ChatBannedRights(until_date=None, view_messages=True)
))
await asyncio.sleep(0.5)
await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=types.ChatBannedRights(until_date=None)
))
else:
raise ValueError('You must pass either a channel or a chat')
# endregion
|
expectocode/Telethon
|
telethon/client/chats.py
|
Python
|
mit
| 42,127
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^profiles/', include('easy_profiles.urls')),
(r'^admin/', include(admin.site.urls)),
)
|
pydanny/django-easy-profiles
|
test_project/urls.py
|
Python
|
mit
| 223
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 14:09:59 2017
@author: SaintlyVi
"""
import pandas as pd
import numpy as np
from support import writeLog
def uncertaintyStats(submodel):
"""
Creates a dict with statistics for observed hourly profiles for a given year.
Use evaluation.evalhelpers.observedHourlyProfiles() to generate the input dataframe.
"""
allstats = list()
for c in submodel['class'].unique():
stats = submodel[submodel['class']==c].describe()
stats['customer_class'] = c
stats.reset_index(inplace=True)
stats.set_index(['customer_class','index'], inplace=True)
allstats.append(stats)
df = pd.concat(allstats)
return df[['AnswerID_count','valid_obs_ratio']]
def dataIntegrity(submodels, min_answerid, min_obsratio):
"""
This function returns the slice of submodels that meet the specified minimum uncertainty requirements. Submodels must form part of the same experiment (eg demand summary and hourly profiles).
"""
if isinstance(submodels, list):
models = submodels
else:
models = [submodels]
validmodels = pd.DataFrame(columns = ['submodel_name','valid_data','uncertainty_index',
'valid_unit_count', 'unit'])
for m in models:
name = m.name
valid_data = m[(m.AnswerID_count>=min_answerid) & (m.valid_obs_ratio>=min_obsratio)]
uix = len(valid_data) / len(m)
try:
valid_unit_count = valid_data['valid_hours'].sum()
unit = 'total_valid_hours'
except:
valid_unit_count = valid_data['AnswerID_count'].sum()
unit = 'valid_AnswerID_count'
validmodels = validmodels.append({'submodel_name':name,
'valid_data':valid_data,
'uncertainty_index':uix,
'valid_unit_count':valid_unit_count,
'unit':unit}, ignore_index=True)
validmodels.set_index('submodel_name', drop=True, inplace=True)
return validmodels
def modelSimilarity(ex_submodel, ex_ts, valid_new_submodel, new_ts, submod_type):
"""
This function calcualtes the evaluation measure for the run.
ex_submodel = (DataFrame) either existing/expert demand_summary or hourly_profiles submodel
valid_new_submodel = (DataFrame) output from dataIntegrity function
-> only want to compare valid data
submod_type = (str) one of [ds, hp]
-> ds=demand_summary, hp=hourly_profiles
"""
if submod_type == 'ds':
index_cols = ['class','YearsElectrified']
elif submod_type == 'hp':
index_cols = ['class','YearsElectrified','month','daytype','hour']
else:
return(print('Valid submod_type is one of [ds, hp] -> ds=demand_summary, hp=hourly_profiles.'))
merged_sub = ex_submodel.merge(valid_new_submodel, how='left', on=index_cols)
simvec = merged_sub[new_ts] - merged_sub[ex_ts]
simvec.dropna(inplace=True)
simveccount = len(simvec)
eucliddist = np.sqrt(sum(simvec**2))
return eucliddist, simveccount, merged_sub
def logCalibration(bm_model, year, exp_model, min_answerid = 2, min_obsratio = 0.85):
"""
This function logs the evaluation results of the run.
ex_model = [demand_summary, hourly_profiles, ds_val_col_name, hp_val_col_name]
"""
#Generate data model
ods = pd.read_csv('data/experimental_model/'+exp_model+'/demand_summary_'+year+'.csv')
ohp = pd.read_csv('data/experimental_model/'+exp_model+'/hourly_profiles_'+year+'.csv')
#Check data integrity
ods.name = 'demand_summary'
ohp.name = 'hourly_profiles'
validmodels = dataIntegrity([ods, ohp], min_answerid, min_obsratio)
valid_new_ds = validmodels.at['demand_summary','valid_data']
valid_new_hp = validmodels.at['hourly_profiles','valid_data']
new_dsts = 'M_kw_mean'
new_hpts = 'kva_mean'
#Fetch benchmark model
bm_ds = bm_model[0]
bm_hp = bm_model[1]
bm_dsts = bm_model[2]
bm_hpts = bm_model[3]
#Calculate model similarity
euclid_ds, count_ds, slice_ex_ds = modelSimilarity(bm_ds, bm_dsts, valid_new_ds, new_dsts, 'ds')
euclid_hp, count_hp, sliced_ex_hp = modelSimilarity(bm_hp, bm_hpts, valid_new_hp, new_hpts, 'hp')
#Prepare and write logs
ds_uix = validmodels.at['demand_summary','uncertainty_index']
ds_vuc = validmodels.at['demand_summary','valid_unit_count']
ds_unit = validmodels.at['demand_summary','unit']
hp_uix = validmodels.at['hourly_profiles','uncertainty_index']
hp_vuc = validmodels.at['hourly_profiles','valid_unit_count']
hp_unit = validmodels.at['hourly_profiles','unit']
loglineds = [year, exp_model, ods.name, min_answerid, min_obsratio, ds_uix, ds_vuc,
ds_unit, euclid_ds, count_ds]
loglinehp = [year, exp_model, ohp.name, min_answerid, min_obsratio, hp_uix, hp_vuc,
hp_unit, euclid_hp, count_hp]
log_lines = pd.DataFrame([loglineds, loglinehp], columns = ['year','experiment',
'submodel','min_answerid_count','min_valid_obsratio',
'uncertainty_ix','valid_unit_count','unit','sim_eucliddist','sim_count'])
writeLog(log_lines,'log_calibration')
|
SaintlyVi/DLR_DB
|
evaluation/calibration.py
|
Python
|
mit
| 5,511
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^img1x1$', 'ses_analytics.views.img1x1', name='img1x1'), # used to trace email opening
# TODO: unsubscription and SNS feedback notifications
)
|
startup-guide/django-ses-analytics
|
ses_analytics/urls.py
|
Python
|
mit
| 228
|
import pickle
import pytest
from doit.cmdparse import DefaultUpdate, CmdParseError, CmdOption, CmdParse
class TestDefaultUpdate(object):
def test(self):
du = DefaultUpdate()
du.set_default('a', 0)
du.set_default('b', 0)
assert 0 == du['a']
assert 0 == du['b']
du['b'] = 1
du.update_defaults({'a':2, 'b':2})
assert 2 == du['a']
assert 1 == du['b']
def test_add_defaults(self):
du = DefaultUpdate()
du.add_defaults({'a': 0, 'b':1})
du['c'] = 5
du.add_defaults({'a':2, 'c':2})
assert 0 == du['a']
assert 1 == du['b']
assert 5 == du['c']
# http://bugs.python.org/issue826897
def test_pickle(self):
du = DefaultUpdate()
du.set_default('x', 0)
dump = pickle.dumps(du,2)
pickle.loads(dump)
class TestCmdOption(object):
def test_repr(self):
opt = CmdOption({'name':'opt1', 'default':'',
'short':'o', 'long':'other'})
assert "CmdOption(" in repr(opt)
assert "'name':'opt1'" in repr(opt)
assert "'short':'o'" in repr(opt)
assert "'long':'other'" in repr(opt)
def test_non_required_fields(self):
opt1 = CmdOption({'name':'op1', 'default':''})
assert '' == opt1.long
def test_invalid_field(self):
opt_dict = {'name':'op1', 'default':'', 'non_existent':''}
pytest.raises(CmdParseError, CmdOption, opt_dict)
def test_missing_field(self):
opt_dict = {'name':'op1', 'long':'abc'}
pytest.raises(CmdParseError, CmdOption, opt_dict)
class TestCmdOption_help_param(object):
def test_bool_param(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':bool,
'short':'b', 'long': 'bobo'})
assert '-b, --bobo' == opt1.help_param()
def test_non_bool_param(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':str,
'short':'s', 'long': 'susu'})
assert '-s ARG, --susu=ARG' == opt1.help_param()
def test_no_long(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':str,
'short':'s'})
assert '-s ARG' == opt1.help_param()
opt_bool = {'name': 'flag',
'short':'f',
'long': 'flag',
'inverse':'no-flag',
'type': bool,
'default': False,
'help': 'help for opt1'}
opt_rare = {'name': 'rare',
'long': 'rare-bool',
'type': bool,
'default': False,
'help': 'help for opt2'}
opt_int = {'name': 'num',
'short':'n',
'long': 'number',
'type': int,
'default': 5,
'help': 'help for opt3'}
opt_no = {'name': 'no',
'short':'',
'long': '',
'type': int,
'default': 5,
'help': 'user cant modify me'}
class TestCmdOption_help_doc(object):
def test_param(self):
opt1 = CmdOption(opt_bool)
got = opt1.help_doc()
assert '-f, --flag' in got[0]
assert 'help for opt1' in got[0]
assert '--no-flag' in got[1]
assert 2 == len(got)
def test_no_doc_param(self):
opt1 = CmdOption(opt_no)
assert 0 == len(opt1.help_doc())
class TestCommand(object):
@pytest.fixture
def cmd(self, request):
opt_list = (opt_bool, opt_rare, opt_int, opt_no)
options = [CmdOption(o) for o in opt_list]
cmd = CmdParse(options)
return cmd
def test_short(self, cmd):
assert "fn:" == cmd.get_short(), cmd.get_short()
def test_long(self, cmd):
assert ["flag", "no-flag", "rare-bool", "number="] == cmd.get_long()
def test_getOption(self, cmd):
# short
opt, is_inverse = cmd.get_option('-f')
assert (opt_bool['name'], False) == (opt.name, is_inverse)
# long
opt, is_inverse = cmd.get_option('--rare-bool')
assert (opt_rare['name'], False) == (opt.name, is_inverse)
# inverse
opt, is_inverse = cmd.get_option('--no-flag')
assert (opt_bool['name'], True) == (opt.name, is_inverse)
# not found
opt, is_inverse = cmd.get_option('not-there')
assert (None, None) == (opt, is_inverse)
def test_parseDefaults(self, cmd):
params, args = cmd.parse([])
assert False == params['flag']
assert 5 == params['num']
def test_parseShortValues(self, cmd):
params, args = cmd.parse(['-n','89','-f'])
assert True == params['flag']
assert 89 == params['num']
def test_parseLongValues(self, cmd):
params, args = cmd.parse(['--rare-bool','--num','89', '--no-flag'])
assert True == params['rare']
assert False == params['flag']
assert 89 == params['num']
def test_parsePositionalArgs(self, cmd):
params, args = cmd.parse(['-f','p1','p2','--sub-arg'])
assert ['p1','p2','--sub-arg'] == args
def test_parseError(self, cmd):
pytest.raises(CmdParseError, cmd.parse, ['--not-exist-param'])
def test_parseWrongType(self, cmd):
pytest.raises(CmdParseError, cmd.parse, ['--num','oi'])
|
lelit/doit
|
tests/test_cmdparse.py
|
Python
|
mit
| 5,264
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Jan 24, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jan 24, 2012"
import os
import unittest
from pymatgen.core.structure import Structure
from pymatgen.io.cssr import Cssr
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
class CssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.cssr = Cssr(p.structure)
def test_str(self):
expected_string = """10.4118 6.0672 4.7595
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.2187 0.7500 0.4749
2 Fe 0.2813 0.2500 0.9749
3 Fe 0.7187 0.7500 0.0251
4 Fe 0.7813 0.2500 0.5251
5 P 0.0946 0.2500 0.4182
6 P 0.4054 0.7500 0.9182
7 P 0.5946 0.2500 0.0818
8 P 0.9054 0.7500 0.5818
9 O 0.0434 0.7500 0.7071
10 O 0.0966 0.2500 0.7413
11 O 0.1657 0.0461 0.2854
12 O 0.1657 0.4539 0.2854
13 O 0.3343 0.5461 0.7854
14 O 0.3343 0.9539 0.7854
15 O 0.4034 0.7500 0.2413
16 O 0.4566 0.2500 0.2071
17 O 0.5434 0.7500 0.7929
18 O 0.5966 0.2500 0.7587
19 O 0.6657 0.0461 0.2146
20 O 0.6657 0.4539 0.2146
21 O 0.8343 0.5461 0.7146
22 O 0.8343 0.9539 0.7146
23 O 0.9034 0.7500 0.2587
24 O 0.9566 0.2500 0.2929"""
self.assertEqual(str(self.cssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "Si.cssr")
cssr = Cssr.from_file(filename)
self.assertIsInstance(cssr.structure, Structure)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/io/tests/test_cssr.py
|
Python
|
mit
| 1,796
|
#encoding:utf-8
subreddit = 'comedynecrophilia'
t_channel = '@comedynecrophilia'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
Fillll/reddit2telegram
|
reddit2telegram/channels/~inactive/comedynecrophilia/app.py
|
Python
|
mit
| 155
|
#!/usr/bin/env python
# −*− coding: UTF−8 −*−
# Topological Sorting
from collections import defaultdict
def topsort(graph):
if not graph:
return []
# 1. Count every node's dependencies
count = defaultdict(int)
for node in graph:
for dependency in graph[node]:
count[dependency] += 1
# 2. Find initial nodes - The ones with no incoming edges, so the ones that
# no dependency points at
initial_nodes = [node for node in graph if count[node] == 0]
if graph and not initial_nodes:
raise Exception("Circular depenency detected")
# 3. Process each node in the order found in initial_nodes. Populate
# initial_nodes with processed node's dependencies if these aren't referred
# in any other node.
result = []
while initial_nodes:
node = initial_nodes.pop()
result.append(node)
for dependency in graph[node]:
count[dependency] -= 1
if count[dependency] == 0:
initial_nodes.append(dependency)
if len(result) != len(graph):
raise Exception("Circular depenency detected")
return result[::-1]
def test():
from simpletest import _assert, _assert_raises
a, b, c, d, e, f = 'abcdef'
graph = {}
_assert(topsort(graph), [])
graph = {
a: set([]),
}
_assert(topsort(graph), [a])
graph = {
a: set([d, b]),
b: set([d, c]),
c: set([d]),
d: set([]),
e: set([d]),
f: set([d, e]),
}
_assert(topsort(graph), [d, c, b, a, e, f])
graph = {
a: set([b]),
b: set([a]),
}
_assert_raises(Exception, topsort, graph)
if __name__ == '__main__':
test()
|
pgularski/snippets
|
python/algorithms/topsort.py
|
Python
|
mit
| 1,733
|
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from acq4.util.DataManager import *
#import acq4.Manager as Manager
import acq4.pyqtgraph as pg
#from acq4.pyqtgraph.MultiPlotWidget import MultiPlotWidget
#from acq4.pyqtgraph.ImageView import ImageView
from acq4.util.DictView import *
import acq4.util.metaarray as metaarray
import weakref
class FileDataView(QtGui.QSplitter):
def __init__(self, parent):
QtGui.QSplitter.__init__(self, parent)
#self.manager = Manager.getManager()
self.setOrientation(QtCore.Qt.Vertical)
self.current = None
self.currentType = None
self.widgets = []
self.dictWidget = None
#self.plots = []
def setCurrentFile(self, file):
#print "=============== set current file ============"
if file is self.current:
return
## What if we just want to update the data display?
#self.clear()
if file is None:
self.current = None
return
if file.isDir():
## Sequence or not?
return
else:
typ = file.fileType()
if typ is None:
return
else:
image = False
with pg.BusyCursor():
data = file.read()
if typ == 'ImageFile':
image = True
elif typ == 'MetaArray':
if data.ndim == 2 and not data.axisHasColumns(0) and not data.axisHasColumns(1):
image = True
elif data.ndim > 2:
image = True
else:
return
with pg.BusyCursor():
if image:
if self.currentType == 'image' and len(self.widgets) > 0:
try:
self.widgets[0].setImage(data, autoRange=False)
except:
print "widget types:", map(type, self.widgets)
raise
else:
self.clear()
w = pg.ImageView(self)
#print "add image:", w.ui.roiPlot.plotItem
#self.plots = [weakref.ref(w.ui.roiPlot.plotItem)]
self.addWidget(w)
w.setImage(data)
self.widgets.append(w)
self.currentType = 'image'
else:
self.clear()
w = pg.MultiPlotWidget(self)
self.addWidget(w)
w.plot(data)
self.currentType = 'plot'
self.widgets.append(w)
#print "add mplot:", w.mPlotItem.plots
#self.plots = [weakref.ref(p[0]) for p in w.mPlotItem.plots]
if (hasattr(data, 'implements') and data.implements('MetaArray')):
if self.dictWidget is None:
w = DictView(data._info)
self.dictWidget = w
#w.setText(str(data._info[-1]))
self.addWidget(w)
self.widgets.append(w)
h = self.size().height()
self.setSizes([h*0.8, h*0.2])
else:
self.dictWidget.setData(data._info)
def clear(self):
for w in self.widgets:
w.close()
w.setParent(None)
self.widgets = []
self.dictWidget = None
|
mgraupe/acq4
|
acq4/modules/DataManager/FileDataView.py
|
Python
|
mit
| 3,567
|
# coding=utf-8
"""
CritSend test proyect urls.
Copyright (C) 2013 Nicolas Valcárcel Scerpella
Authors:
Nicolas Valcárcel Scerpella <nvalcarcel@gmail.com>
"""
# Standard library imports
# Framework imports
from django.conf.urls import patterns, include, url
from django.contrib import admin
# 3rd party imports
# Local imports
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('upload.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
nxvl/critsend_test
|
test_proyect/urls.py
|
Python
|
mit
| 482
|
"""The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.html.widgets import Widget
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets import Instance, Type, Any
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir=self.profile_dir,
user_module=self.user_module,
user_ns=self.user_ns,
kernel=self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target(
'ipython.widget', Widget.handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(
self.comm_manager, msg_type)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info(
"Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches': matches,
'cursor_end': cursor_pos,
'cursor_start': cursor_pos - len(txt),
'metadata': {},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status': 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {'history': list(hist)}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(
code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
shell = self.shell
try:
working = shell.user_ns
prefix = "_" + str(msg_id).replace("-", "") + "_"
f, args, kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
ns = {fname: f, argname: args, kwargname: kwargs, resultname: None}
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname,
fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except:
# invoke IPython traceback formatting
shell.showtraceback()
# FIXME - fish exception info out of shell, possibly left there by
# run_code. We'll need to clean up this logic later.
reply_content = {}
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info(
"Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
if reply_content['ename'] == 'UnmetDependency':
reply_metadata['dependencies_met'] = False
else:
reply_content = {'status': 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
@undoc
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of IPython.kernel.zmq.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
mattvonrocketstein/smash
|
smashlib/ipy3x/kernel/zmq/ipkernel.py
|
Python
|
mit
| 13,151
|
"""Test cases for JSON lws_logger module, assumes Pytest."""
from jsonutils.lws import lws_logger
class TestDictToTreeHelpers:
"""Test the helper functions for dict_to_tree."""
def test_flatten_list(self):
"""Test flattening of nested lists."""
f = lws_logger.flatten_list
nested = [1, [2, 3, [[4], 5]]]
assert list(f(nested)) == [1, 2, 3, 4, 5]
nested = [[[1]]]
assert list(f(nested)) == [1]
flat = [1, 2]
assert list(f(flat)) == [1, 2]
def test_filter_errors(self):
"""Test error filtering (helper function to filter_keys)."""
f = lws_logger.filter_errors
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
seq = [100, 99, 99, 99]
assert f(seq, errors) == [100]
seq = [99]
assert f(seq, errors) == ['key error']
seq = [-99, -99, 100]
assert f(seq, errors) == [100]
seq = [-99, -99]
assert f(seq, errors) == ['val error']
def test_filter_errors_single(self):
"""Test list error term filtering, single error."""
f = lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
pairs = [('a', 'hi'), ('a', 99), ('b', 'hi')]
filtered = [('a', 'hi'), ('b', 'hi')]
assert f(pairs, errors) == filtered
def test_filter_errors_multiple(self):
"""Test list error term filtering, multiple errors."""
f = lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
pairs = [('a', 'hi'), ('a', 99), ('a', 99),
('b', 'hi'), ('b', -99)]
filtered = [('a', 'hi'), ('b', 'hi')]
assert f(pairs, errors) == filtered
def test_filter_errors_only(self):
"""Test list error term filtering, only errors."""
f = lws_logger.filter_keys
errors = {'key': 99,
'key_str': 'key error',
'val': -99,
'val_str': 'val error'}
pairs = [('a', 99), ('b', -99)]
filtered = [('a', 'key error'), ('b', 'val error')]
assert f(pairs, errors) == filtered
class TestLoggerHelpers:
"""Test the helper functions for logger."""
def test_dict_to_tree_simple(self):
"""Test dict_to_tree simple dicts."""
f = lws_logger.dict_to_tree
simple_d = {'root': ['a', 'b']}
flat_list = [('root', 0), [('a', 1)], [('b', 1)]]
assert f(simple_d, 'root', [('root', 0)]) == flat_list
nested_d = {'root': ['a', 'b'], 'a': ['one', 'two']}
nested_list = [('root', 0), [('a', 1), [('one', 2)], [('two', 2)]],
[('b', 1)]]
assert f(nested_d, 'root', [('root', 0)]) == nested_list
def test_parse_errors_one(self):
"""Test scenario with one type of error."""
f = lws_logger.parse_errors
errors = {'key_str': 'key error',
'val_str': 'val error'}
nodes = [('one', 'key error'), ('two', 3), ('three', 3)]
output = 'Key Errors:\t1\nValue Errors:\t0'
assert f(nodes, errors) == (1, 0, output)
def test_parse_errors_both(self):
"""Test scenario with two types of errors."""
f = lws_logger.parse_errors
errors = {'key_str': 'key error',
'val_str': 'val error'}
nodes = [('one', 'key error'), ('two', 3), ('three', 3),
('four', 'val error')]
output = 'Key Errors:\t1\nValue Errors:\t1'
assert f(nodes, errors) == (1, 1, output)
def test_format_node(self):
"""Test node to string function."""
f = lws_logger.format_node
assert f('a', '----', 1) == '|----a'
assert f('a', '----', 2) == ' |----a'
|
tkuriyama/jsonutils
|
jsonutils/lws/test/test_lws_logger.py
|
Python
|
mit
| 3,990
|
import os
import unittest
import random
import xmlrunner
host = os.environ['FALKONRY_HOST_URL'] # host url
token = os.environ['FALKONRY_TOKEN'] # auth token
class TestDatastream(unittest.TestCase):
def setUp(self):
self.created_datastreams = []
self.fclient = FClient(host=host, token=token, options=None)
pass
# Create datastream without any signals
def test_create_standalone_datastream(self):
datastream = Schemas.Datastream()
datastream.set_name('Motor Health' + str(random.random()))
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
time.set_zone("GMT")
time.set_identifier("time")
time.set_format("iso_8601")
field.set_signal(signal)
datasource.set_type("STANDALONE")
field.set_time(time)
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream for narrow/historian style data from a single entity
def test_create_datastream_narrow_style_single_entity(self):
datastream = Schemas.Datastream()
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
datastream.set_name('Motor Health' + str(random.random())) # set name of the Datastream
time.set_zone("GMT") # set timezone of the datastream
time.set_identifier("time") # set time identifier of the datastream
time.set_format("iso_8601") # set time format of the datastream
field.set_time(time)
signal.set_valueIdentifier("value")
signal.set_signalIdentifier("signal")
field.set_signal(signal) # set signal in field
datasource.set_type("STANDALONE") # set datastource type in datastream
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
# create Datastream
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
signalResponse = fieldResponse.get_signal()
self.assertEqual(signalResponse.get_valueIdentifier(),signal.get_valueIdentifier(), 'Invalid value identifier after object creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream for narrow/historian style data from a multiple entities
def test_create_datastream_narrow_style_multiple_entity(self):
datastream = Schemas.Datastream()
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
datastream.set_name('Motor Health' + str(random.random())) # set name of the Datastream
time.set_zone("GMT") # set timezone of the datastream
time.set_identifier("time") # set time identifier of the datastream
time.set_format("iso_8601") # set time format of the datastream
field.set_time(time)
signal.set_signalIdentifier("signal") # set signal identifier
signal.set_valueIdentifier("value") # set value identifier
field.set_entityIdentifier("entity") # set entity identifier
field.set_signal(signal) # set signal in field
datasource.set_type("STANDALONE") # set datastource type in datastream
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
# create Datastream
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityName(),None,'Invalid entity name object after creation')
signalResponse = fieldResponse.get_signal()
self.assertEqual(signalResponse.get_valueIdentifier(),signal.get_valueIdentifier(), 'Invalid value identifier after object creation')
self.assertEqual(signalResponse.get_signalIdentifier(), signal.get_signalIdentifier(), 'Invalid signal identifier after object creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream for wide style data from a single entity
def test_create_datastream_wide_style_single_entity(self):
datastream = Schemas.Datastream()
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
input1 = Schemas.Input()
input2 = Schemas.Input()
input3 = Schemas.Input()
datastream.set_name('Motor Health' + str(random.random())) # set name of the Datastream
input1.set_name("Signal1") # set name of input signal
input1.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input1.set_event_type("Samples") # set event type of input signal
input2.set_name("Signal2") # set name of input signal
input2.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input2.set_event_type("Samples") # set event type of input signal
input3.set_name("Signal3") # set name of input signal
input3.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input3.set_event_type("Samples") # set event type of input signal
inputs = []
inputs.append(input1)
inputs.append(input2)
inputs.append(input3)
time.set_zone("GMT") # set timezone of the datastream
time.set_identifier("time") # set time identifier of the datastream
time.set_format("iso_8601") # set time format of the datastream
field.set_time(time)
field.set_signal(signal) # set signal in field
datasource.set_type("STANDALONE") # set datastource type in datastream
datastream.set_datasource(datasource)
datastream.set_field(field)
datastream.set_inputs(inputs)
try:
# create Datastream
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
inputs = response.get_inputs()
self.assertEqual(isinstance(inputs, list), True, 'Invalid inputs object after creation')
self.assertEqual(len(inputs), 3, 'Invalid inputs object after creation')
inputResp1 = inputs.__getitem__(0)
inputResp2 = inputs.__getitem__(1)
inputResp3 = inputs.__getitem__(2)
self.assertEqual(inputResp1.get_name(), input1.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp1.get_value_type(), input1.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp2.get_name(), input2.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp2.get_value_type(), input2.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp3.get_name(), input3.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp3.get_value_type(), input3.get_value_type(),'Invalid input value type after object creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream for wide style data from a multiple entities
def test_create_datastream_wide_style_multiple_entity(self):
datastream = Schemas.Datastream()
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
input1 = Schemas.Input()
input2 = Schemas.Input()
input3 = Schemas.Input()
datastream.set_name('Motor Health' + str(random.random())) # set name of the Datastream
input1.set_name("Signal1") # set name of input signal
input1.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input1.set_event_type("Samples") # set event type of input signal
input2.set_name("Signal2") # set name of input signal
input2.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input2.set_event_type("Samples") # set event type of input signal
input3.set_name("Signal3") # set name of input signal
input3.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input3.set_event_type("Samples") # set event type of input signal
inputs = []
inputs.append(input1)
inputs.append(input2)
inputs.append(input3)
time.set_zone("GMT") # set timezone of the datastream
time.set_identifier("time") # set time identifier of the datastream
time.set_format("iso_8601") # set time format of the datastream
field.set_time(time)
field.set_signal(signal) # set signal in field
field.set_entityIdentifier("entity")
datasource.set_type("STANDALONE") # set datastource type in datastream
datastream.set_datasource(datasource)
datastream.set_field(field)
datastream.set_inputs(inputs)
try:
# create Datastream
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),None,'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
inputs = response.get_inputs()
self.assertEqual(isinstance(inputs, list), True, 'Invalid inputs object after creation')
self.assertEqual(len(inputs), 3, 'Invalid inputs object after creation')
inputResp1 = inputs.__getitem__(0)
inputResp2 = inputs.__getitem__(1)
inputResp3 = inputs.__getitem__(2)
self.assertEqual(inputResp1.get_name(), input1.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp1.get_value_type(), input1.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp2.get_name(), input2.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp2.get_value_type(), input2.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp3.get_name(), input3.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp3.get_value_type(), input3.get_value_type(),'Invalid input value type after object creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Retrieve Datastreams
def test_get_datastream_list(self):
datastream = Schemas.Datastream()
datastream.set_name('Motor Health' + str(random.random()))
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
time.set_zone("GMT")
time.set_identifier("time")
time.set_format("iso_8601")
field.set_signal(signal)
datasource.set_type("STANDALONE")
field.set_time(time)
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
# get datastream list
datastreamList = self.fclient.get_datastreams()
self.assertEqual(isinstance(datastreamList, list), True, 'Invalid datastreamlist in response')
self.assertEqual(len(datastreamList) > 0, True, 'No datastreams in get response')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Retrieve Datastream by Id
def test_get_datastream_by_id(self):
datastream = Schemas.Datastream()
datastream.set_name('Motor Health' + str(random.random()))
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
time.set_zone("GMT")
time.set_identifier("time")
time.set_format("iso_8601")
signal.set_signalIdentifier("signal")
signal.set_valueIdentifier("value")
field.set_signal(signal)
datasource.set_type("STANDALONE")
field.set_time(time)
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
# get datastream list
datastreamResp = self.fclient.get_datastream(response.get_id())
self.assertEqual(isinstance(datastreamResp,Schemas.Datastream), True, 'Invalid time object after creation')
self.assertEqual(response.get_id(), datastreamResp.get_id(), 'Invalid id of datastream after creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Delete Datastream
def test_delete_datastream_by_id(self):
datastream = Schemas.Datastream()
datastream.set_name('Motor Health' + str(random.random()))
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
time.set_zone("GMT")
time.set_identifier("time")
time.set_format("iso_8601")
field.set_signal(signal)
datasource.set_type("STANDALONE")
field.set_time(time)
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
response = self.fclient.create_datastream(datastream)
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
# delete datastream
try:
self.fclient.delete_datastream(response.get_id())
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot delete datastream')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream microseconds precision
def test_create_datastream_micro_second_precision(self):
datastream = Schemas.Datastream()
datastream.set_name('Motor Health' + str(random.random()))
datastream.set_time_precision('micro') # set 'micro' for microseconds precision
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
time.set_zone("GMT")
time.set_identifier("time")
time.set_format("iso_8601")
signal.set_signalIdentifier("signal")
signal.set_valueIdentifier("value")
field.set_entityIdentifier("entity")
field.set_signal(signal)
datasource.set_type("STANDALONE")
field.set_time(time)
datastream.set_datasource(datasource)
datastream.set_field(field)
try:
# create Datastream
response = self.fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),None,'Invalid entity name object after creation')
signalResponse = fieldResponse.get_signal()
self.assertEqual(signalResponse.get_signalIdentifier(), "signal", 'Invalid signal identifier object after creation')
self.assertEqual(signalResponse.get_valueIdentifier(),signal.get_valueIdentifier(), 'Invalid value identifier after object creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
self.assertEqual(response.get_time_precision(), datastream.get_time_precision(), 'Invalid time precision after creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
# Create Datastream for batch identifier
def test_create_datastream_with_batch_identifier(self):
fclient = FClient(host=host, token=token,options=None)
datastream = Schemas.Datastream()
datasource = Schemas.Datasource()
field = Schemas.Field()
time = Schemas.Time()
signal = Schemas.Signal()
input1 = Schemas.Input()
input2 = Schemas.Input()
input3 = Schemas.Input()
datastream.set_name('Motor Health' + str(random.random())) # set name of the Datastream
input1.set_name("Signal1") # set name of input signal
input1.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input1.set_event_type("Samples") # set event type of input signal
input2.set_name("Signal2") # set name of input signal
input2.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input2.set_event_type("Samples") # set event type of input signal
input3.set_name("Signal3") # set name of input signal
input3.set_value_type("Numeric") # set value type of input signal (Numeric for number, Categorical for string type)
input3.set_event_type("Samples") # set event type of input signal
inputs = []
inputs.append(input1)
inputs.append(input2)
inputs.append(input3)
time.set_zone("GMT") # set timezone of the datastream
time.set_identifier("time") # set time identifier of the datastream
time.set_format("iso_8601") # set time format of the datastream
field.set_time(time)
field.set_signal(signal) # set signal in field
field.set_batchIdentifier("batch") # set batchIdentifier in field
datasource.set_type("STANDALONE") # set datastource type in datastream
datastream.set_datasource(datasource)
datastream.set_field(field)
datastream.set_inputs(inputs)
try:
# create Datastream
response = fclient.create_datastream(datastream)
self.created_datastreams.append(response.get_id())
self.assertEqual(isinstance(response, Schemas.Datastream), True, 'Invalid Datastream object after creation')
self.assertEqual(isinstance(response.get_id(), str), True, 'Invalid id of datastream after creation')
self.assertEqual(response.get_name(), datastream.get_name(), 'Invalid name of Datastream after creation')
fieldResponse = response.get_field()
self.assertEqual(isinstance(fieldResponse, Schemas.Field), True, 'Invalid field in Datastream object after creation')
self.assertEqual(fieldResponse.get_entityIdentifier(),"entity",'Invalid entity identifier object after creation')
self.assertEqual(fieldResponse.get_entityName(),response.get_name(),'Invalid entity name object after creation')
self.assertEqual(fieldResponse.get_batchIdentifier(),"batch",'Invalid batchIdentifier after creation')
timeResponse = fieldResponse.get_time()
self.assertEqual(isinstance(timeResponse, Schemas.Time), True, 'Invalid time object after creation')
self.assertEqual(timeResponse.get_zone(), time.get_zone(), 'Invalid zone object after creation')
self.assertEqual(timeResponse.get_identifier(), time.get_identifier(), 'Invalid time identifier object after creation')
self.assertEqual(timeResponse.get_format(), time.get_format(), 'Invalid time format object after creation')
inputs = response.get_inputs()
self.assertEqual(isinstance(inputs, list), True, 'Invalid inputs object after creation')
self.assertEqual(len(inputs), 3, 'Invalid inputs object after creation')
inputResp1 = inputs.__getitem__(0)
inputResp2 = inputs.__getitem__(1)
inputResp3 = inputs.__getitem__(2)
self.assertEqual(inputResp1.get_name(), input1.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp1.get_value_type(), input1.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp2.get_name(), input2.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp2.get_value_type(), input2.get_value_type(),'Invalid input value type after object creation')
self.assertEqual(inputResp3.get_name(), input3.get_name(),'Invalid input after object creation')
self.assertEqual(inputResp3.get_value_type(), input3.get_value_type(),'Invalid input value type after object creation')
except Exception as e:
print(exception_handler(e))
self.assertEqual(0, 1, 'Cannot create datastream')
def tearDown(self): # teardown
for ds in self.created_datastreams:
try:
self.fclient.delete_datastream(ds)
except Exception as e:
print(exception_handler(e))
pass
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append(
path.dirname(
path.dirname(
path.abspath(__file__)
)
)
)
from falkonryclient import schemas as Schemas
from falkonryclient import client as FClient
from falkonryclient.helper.utils import exception_handler
else:
from ..falkonryclient import schemas as Schemas
from ..falkonryclient import client as FClient
from ..falkonryclient.helper.utils import exception_handler
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='out'),
failfast=False, buffer=False, catchbreak=False)
else:
from falkonryclient import schemas as Schemas
from falkonryclient import client as FClient
from falkonryclient.helper.utils import exception_handler
|
Falkonry/falkonry-python-client
|
test/TestDatastream.py
|
Python
|
mit
| 35,686
|
# Zadání:
#########
#
# Úkolem je implementovat kompresi textu Huffmanovým kódováním.
#
# Vstup:
# * dva argumenty na příkazové řádce: jméno vstupního souboru a jméno
# výstupního souboru
#
# Vstupní soubor je textový soubor, který obsahuje různě dlouhé řádky textu,
# text obsahuje pouze písmena abecedy (malá a velká), a číslice.
#
# Předpokládejte rozumně dlouhé řádky (např. max. 1000 znaků na řádku).
#
# Program načte vstupní soubor, spočítá četnost jednotlivých znaků a vypočte
# algoritmem Huffmanova kódování jejich komprimovaný obraz.
#
# Do výstupního souboru bude uloženo toto kódování ve formě: znak kod
#
# Na každém řádku bude uveden jedno písmeno a jeden kód.
# Počet výstupních řádků je tedy roven délce abecedy (počet znaků), které jsou
# obsaženy ve vstupním souboru.
# Kód vytvořte pouze pro velká a malá písmena a pro číslice, ignorujte
# white-space znaky.
#
# Na pořadí písmen ve výstupním souboru nezáleží.
# Na velikosti písmen záleží (např. znaky 'a' a 'A' budou mít rozdílný kód).
# Odevzdejte do odevzdávacího systému jako huffman.py, úkol HW10.
#
# Příklad:
"""
aaaaaa bbbb cc dddd ee
d
aa d d
d a b c
"""
# Příklad spuštění:
#
# > python3 huffman.py input.txt output.txt
#
# Výstup v output.txt bude obsahovat:
"""
c 100
e 101
a 00
d 01
b 11
"""
###############################################################################
import sys # Nutný pro zjištění parametrů příkazové řádky
import copy # Jen pro můj duševní klid
# Následující import se moc hodí při ladění, po odkomentování je možné pomocí
#
# pp = pprint.PrettyPrinter( indent = 4)
# pp.pprint( <sem_vloz> )
#
# nechat pěkně vypsat dictionary a pole.
# import pprint
def countCharactersInFile( filename ):
"""
Spočítá počet výskytů znaků anglické abecedy a čísel. Rozlišuje mezi
velkými a malými písmeny.
Parametry:
----------
filename: string
Jméno souboru, ve kterém má znaky spočítat.
Vrací:
------
list
Pole složené z dictionary:
{
"name": Znak číslice či písmena
"count": Počet výskytů v souboru
}
Jednotlivé slovníky jsou seřazeny vzestupně podle hodnoty count.
"""
characters = []
# Otevře soubor ke čtení
with open( filename ) as file:
# Projde každou řádku
for line in file:
# Projde každý znak v řádku
for char in line:
# Pokud jde o znak nebo číslo
if char.isalpha() or char.isdecimal():
# Pokusí se znak vyhledat v poli characters. Pokud neexistuje, vrátí
# None.
char_data = next( (n for n in characters if n["name"] == char), None )
# Pokud jde o první výskyt tohoto znaku
if char_data == None:
# Vytvoří jeho reprezentaci a vloží jí na seznam
char_data = {"name": char, "count": 0 }
characters.append( char_data )
# Zvýší počítadlo o 1
char_data["count"] += 1
# Vrátí pole slovníků seřazené vzestupně podle četnosti výskytu znaků v souboru
return sorted( characters, key = lambda elem: elem["count"] )
def buildHuffmanTree( probability ):
"""
Vystaví strom pro další zpracování huffmanovým kódováním.
Parametry:
----------
probability: list
Pole slovníků ve formátu:
{
"name": Znak číslice či písmena
"count": Počet výskytů v souboru
}
Pole musí být seřazeno od nejmenšího po nejvyšší výskyt.
Vrací:
------
dictionary
Slovník ve formátu:
{
"name": Znak/číslo (pokud jde o počítaný znak) nebo prázdný
řetězec (pokud jde o zástupný node)
"count": Hodnota node
"left: Uvedeno jen pokud je "name" prázdné. Obsahuje
slovník.
"right": Uvedeno jen pokud je "name" prázdné. Obsahuje
slovník.
}
"""
# Nechci upravovat originální parametr
# Jen pro sichr, možná je to tu navíc :(
tree = copy.deepcopy( probability )
# Dokud nezbývá jen jeden parametr
while len( tree ) > 1:
# Vezme dva vrcholy s nejmenší hodností (nejmenší count)
# Pole je seřazeno od nejmenších po největší, proto stačí pop
right = tree.pop(0) # Vpravo jsou menší čísla, aby to vycházelo
left = tree.pop(0)
# Vytvoří se rodičovský node
parent = {
"name": "", # Prázdné jméno značí "rodičovský" node
"count": left["count"] + right["count"],
"left": left,
"right": right,
}
# Přidá rodičovský node zpět
tree.append( parent )
# Opětovně seřadí pole
tree = sorted( tree, key = lambda elem: elem["count"] )
# Vrací jen poslední slovník, pole je nadbytečné
return tree[0]
def getHuffmanCoding( tree, prefix="", code = {} ):
"""
Projde rekurzivně předaný strom a ohodnotí jednotlivé znaky dle
pravidel Huffmanova kódování.
To znamená, že hrana jdoucí od svého rodiče nalevo, má
ohodnocení 0 a hrana napravo ohodnocení 1.
Ohodnocení samotného vrcholu je tvořen posloupností kódů
Parametry:
----------
tree: dictionary
Slovník reprezentující strom, který se má projít
prefix: string
Řetězec obsahující kód cesty k tomuto node
code: dictionary
Slovník obsahujcí páry písmen/číslic (klíč) a jejich kódu.
Vrací:
------
dictionary
Slovník obsahující páry písmen/čísli (klíč) a jejich kódu.
"""
# Pokud je potomek nalevo jen "rodičovský node"
if tree["left"]["name"] == "":
getHuffmanCoding( tree["left"], prefix+"0", code)
else:
# Node je znak/číslo, rovnou vytvoří jeho kód
code[ tree["left"]["name"] ] = prefix+"0"
# Pokud je potomek napravo jen "rodičovský node"
if tree["right"]["name"] == "":
getHuffmanCoding( tree["right"], prefix+"1", code )
else:
# Node je znak/čéslo, rovnou vytvoří kód
code[ tree["right"]["name"] ] = prefix+"1"
return (code)
# Počet parametrů příkazové řádky
argc = len( sys.argv )
# Potřebujeme tři parametry
if argc != 3:
print("Použijte:")
print("huffman.py input_file_name output_file_name")
exit()
# Frekvence jednotlivých znaků
frequency = countCharactersInFile( sys.argv[1] )
# Stromová reprezentace
tree = buildHuffmanTree( frequency )
# Huffmanovo kódování
codes = getHuffmanCoding( tree )
# Zápís do souboru
with open( sys.argv[2], "w") as f:
for key in codes:
print( key, codes[key], file=f )
|
malja/cvut-python
|
cviceni11/huffman.py
|
Python
|
mit
| 7,419
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ErpInventory(IdentifiedObject):
"""Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.
"""
def __init__(self, Asset=None, status=None, *args, **kw_args):
"""Initialises a new 'ErpInventory' instance.
@param Asset:
@param status:
"""
self._Asset = None
self.Asset = Asset
self.status = status
super(ErpInventory, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Asset", "status"]
_many_refs = []
def getAsset(self):
return self._Asset
def setAsset(self, value):
if self._Asset is not None:
self._Asset._ErpInventory = None
self._Asset = value
if self._Asset is not None:
self._Asset.ErpInventory = None
self._Asset._ErpInventory = self
Asset = property(getAsset, setAsset)
status = None
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfERPSupport/ErpInventory.py
|
Python
|
mit
| 2,914
|
#!/usr/bin/python
from TimeWorked import *
import os
files = [TIME_WORKED]
print get_total_time_by_day_files(files)
raw_input()
|
JasonGross/time-worked
|
pastWeekTime.py
|
Python
|
mit
| 128
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, re
from frappe.utils import touch_file, encode, cstr
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print "Destination directory does not exist"
return
# app_name should be in snake_case
app_name = frappe.scrub(app_name)
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (default: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = cstr(raw_input(key + ": "))
if not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print "App Name must be all lowercase and without spaces"
hook_val = ""
elif hook_key=="app_title" and not re.match("^(?![\W])[^\d_\s][\w -]+$", hook_val, re.UNICODE):
print "App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores"
hook_val = ""
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"css"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"js"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"), "w") as f:
f.write(encode(init_template))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(encode(manifest_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(encode(gitignore_template.format(app_name = hooks.app_name)))
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(encode(setup_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write(encode("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license)))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write(encode("License: " + hooks.app_license))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(encode(hooks.app_title))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(encode(hooks_template.format(**hooks)))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(encode(desktop_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "docs.py"), "w") as f:
f.write(encode(docs_template.format(**hooks)))
print "'{app}' created at {path}".format(app=app_name, path=os.path.join(dest, app_name))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
init_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
"""
hooks_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_license = "{app_license}"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js in page
# page_js = {{"page" : "public/js/file.js"}}
# include js in doctype views
# doctype_js = {{"doctype" : "public/js/doctype.js"}}
# doctype_list_js = {{"doctype" : "public/js/doctype_list.js"}}
# doctype_tree_js = {{"doctype" : "public/js/doctype_tree.js"}}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Website user home page (by function)
# get_website_user_home_page = "{app_name}.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{{
"module_name": "{app_title}",
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
]
"""
setup_template = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in {app_name}/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{app_name}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='{app_name}',
version=version,
description='{app_description}',
author='{app_publisher}',
author_email='{app_email}',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
"""
gitignore_template = """.DS_Store
*.pyc
*.egg-info
*.swp
tags
{app_name}/docs/current"""
docs_template = '''"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/{app_name}"
# docs_base_url = "https://[org_name].github.io/{app_name}"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "{app_title}"
'''
|
rohitwaghchaure/frappe
|
frappe/utils/boilerplate.py
|
Python
|
mit
| 9,035
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script adds a missing references section to pages.
It goes over multiple pages, searches for pages where <references />
is missing although a <ref> tag is present, and in that case adds a new
references section.
These command line parameters can be used to specify which pages to work on:
¶ms;
-xml Retrieve information from a local XML dump (pages-articles
or pages-meta-current, see https://download.wikimedia.org).
Argument can also be given as "-xml:filename".
-namespace:n Number or name of namespace to process. The parameter can be
used multiple times. It works in combination with all other
parameters, except for the -start parameter. If you e.g.
want to iterate over all categories starting at M, use
-start:Category:M.
-always Don't prompt you for each replacement.
-quiet Use this option to get less output
If neither a page title nor a page generator is given, it takes all pages from
the default maintenance category.
It is strongly recommended not to run this script over the entire article
namespace (using the -start) parameter, as that would consume too much
bandwidth. Instead, use the -xml parameter, or use another way to generate
a list of affected articles
"""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
import pywikibot
from pywikibot import i18n, pagegenerators, textlib, Bot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
# References sections are usually placed before further reading / external
# link sections. This dictionary defines these sections, sorted by priority.
# For example, on an English wiki, the script would place the "References"
# section in front of the "Further reading" section, if that existed.
# Otherwise, it would try to put it in front of the "External links" section,
# or if that fails, the "See also" section, etc.
placeBeforeSections = {
'ar': [ # no explicit policy on where to put the references
u'وصلات خارجية',
u'انظر أيضا',
u'ملاحظات'
],
'ca': [
u'Bibliografia',
u'Bibliografia complementària',
u'Vegeu també',
u'Enllaços externs',
u'Enllaços',
],
'cs': [
u'Externí odkazy',
u'Poznámky',
],
'da': [ # no explicit policy on where to put the references
u'Eksterne links'
],
'de': [ # no explicit policy on where to put the references
u'Literatur',
u'Weblinks',
u'Siehe auch',
u'Weblink', # bad, but common singular form of Weblinks
],
'dsb': [
u'Nožki',
],
'en': [ # no explicit policy on where to put the references
u'Further reading',
u'External links',
u'See also',
u'Notes'
],
'ru': [
u'Ссылки',
u'Литература',
],
'eo': [
u'Eksteraj ligiloj',
u'Ekstera ligilo',
u'Eksteraj ligoj',
u'Ekstera ligo',
u'Rete'
],
'es': [
u'Enlaces externos',
u'Véase también',
u'Notas',
],
'fa': [
u'پیوند به بیرون',
u'پانویس',
u'جستارهای وابسته'
],
'fi': [
u'Kirjallisuutta',
u'Aiheesta muualla',
u'Ulkoiset linkit',
u'Linkkejä',
],
'fr': [
u'Liens externes',
u'Voir aussi',
u'Notes'
],
'he': [
u'ראו גם',
u'לקריאה נוספת',
u'קישורים חיצוניים',
u'הערות שוליים',
],
'hsb': [
u'Nóžki',
],
'hu': [
u'Külső hivatkozások',
u'Lásd még',
],
'it': [
u'Bibliografia',
u'Voci correlate',
u'Altri progetti',
u'Collegamenti esterni',
u'Vedi anche',
],
'ja': [
u'関連項目',
u'参考文献',
u'外部リンク',
],
'ko': [ # no explicit policy on where to put the references
u'외부 링크',
u'외부링크',
u'바깥 고리',
u'바깥고리',
u'바깥 링크',
u'바깥링크'
u'외부 고리',
u'외부고리'
],
'lt': [ # no explicit policy on where to put the references
u'Nuorodos'
],
'nl': [ # no explicit policy on where to put the references
u'Literatuur',
u'Zie ook',
u'Externe verwijzingen',
u'Externe verwijzing',
],
'pdc': [
u'Beweisunge',
u'Quelle unn Literatur',
u'Gwelle',
u'Gwuelle',
u'Auswenniche Gleecher',
u'Gewebbgleecher',
u'Guckt mol aa',
u'Seh aa',
],
'pl': [
u'Źródła',
u'Bibliografia',
u'Zobacz też',
u'Linki zewnętrzne',
],
'pt': [
u'Ligações externas',
u'Veja também',
u'Ver também',
u'Notas',
],
'sk': [
u'Pozri aj',
],
'szl': [
u'Przipisy',
u'Připisy',
],
'th': [
u'อ่านเพิ่มเติม',
u'แหล่งข้อมูลอื่น',
u'ดูเพิ่ม',
u'หมายเหตุ',
],
'zh': [
u'外部链接',
u'外部連结',
u'外部連結',
u'外部连接',
],
}
# Titles of sections where a reference tag would fit into.
# The first title should be the preferred one: It's the one that
# will be used when a new section has to be created.
referencesSections = {
'ar': [ # not sure about which ones are preferred.
u'مراجع',
u'المراجع',
u'مصادر',
u'المصادر',
u'مراجع ومصادر',
u'مصادر ومراجع',
u'المراجع والمصادر',
u'المصادر والمراجع',
],
'ca': [
u'Referències',
],
'cs': [
u'Reference',
u'Poznámky',
],
'da': [
u'Noter',
],
'de': [ # see [[de:WP:REF]]
u'Einzelnachweise',
u'Anmerkungen',
u'Belege',
u'Endnoten',
u'Fußnoten',
u'Fuß-/Endnoten',
u'Quellen',
u'Quellenangaben',
],
'dsb': [
u'Nožki',
],
'en': [ # not sure about which ones are preferred.
u'References',
u'Footnotes',
u'Notes',
],
'ru': [
u'Примечания',
u'Сноски',
u'Источники',
],
'eo': [
u'Referencoj',
],
'es': [
u'Referencias',
u'Notas',
],
'fa': [
u'منابع',
u'منبع'
],
'fi': [
u'Lähteet',
u'Viitteet',
],
'fr': [ # [[fr:Aide:Note]]
u'Notes et références',
u'Références',
u'References',
u'Notes'
],
'he': [
u'הערות שוליים',
],
'hsb': [
u'Nóžki',
],
'hu': [
u'Források és jegyzetek',
u'Források',
u'Jegyzetek',
u'Hivatkozások',
u'Megjegyzések',
],
'is': [
u'Heimildir',
u'Tilvísanir',
],
'it': [
u'Note',
u'Riferimenti',
],
'ja': [
u'脚注',
u'脚注欄',
u'脚注・出典',
u'出典',
u'注釈',
u'註',
],
'ko': [
u'주석',
u'각주'
u'주석 및 참고 자료'
u'주석 및 참고자료',
u'주석 및 참고 출처'
],
'lt': [ # not sure about which ones are preferred.
u'Šaltiniai',
u'Literatūra',
],
'nl': [ # not sure about which ones are preferred.
u'Voetnoten',
u'Voetnoot',
u'Referenties',
u'Noten',
u'Bronvermelding',
],
'pdc': [
u'Aamarrickunge',
],
'pl': [
u'Przypisy',
u'Uwagi',
],
'pt': [
u'Referências',
],
'sk': [
u'Referencie',
],
'szl': [
u'Przipisy',
u'Připisy',
],
'th': [
u'อ้างอิง',
u'เชิงอรรถ',
u'หมายเหตุ',
],
'zh': [
u'參考資料',
u'参考资料',
u'參考文獻',
u'参考文献',
u'資料來源',
u'资料来源',
],
}
# Templates which include a <references /> tag. If there is no such template
# on your wiki, you don't have to enter anything here.
referencesTemplates = {
'wikipedia': {
'ar': ['Reflist', 'مراجع', 'ثبت المراجع', 'ثبت_المراجع',
'بداية المراجع', 'نهاية المراجع'],
'be': [u'Зноскі', u'Примечания', u'Reflist', u'Спіс заўваг',
u'Заўвагі'],
'be-tarask': [u'Зноскі'],
'ca': [u'Referències', u'Reflist', u'Listaref', u'Referència',
u'Referencies', u'Referències2',
u'Amaga', u'Amaga ref', u'Amaga Ref', u'Amaga Ref2', u'Apèndix'],
'da': [u'Reflist'],
'dsb': [u'Referency'],
'en': [u'Reflist', u'Refs', u'FootnotesSmall', u'Reference',
u'Ref-list', u'Reference list', u'References-small', u'Reflink',
u'Footnotes', u'FootnotesSmall'],
'eo': [u'Referencoj'],
'es': ['Listaref', 'Reflist', 'muchasref'],
'fa': [u'Reflist', u'Refs', u'FootnotesSmall', u'Reference',
u'پانویس', u'پانویسها ', u'پانویس ۲', u'پانویس۲',
u'فهرست منابع'],
'fi': [u'Viitteet', u'Reflist'],
'fr': [u'Références', u'Notes', u'References', u'Reflist'],
'he': [u'הערות שוליים', u'הערה'],
'hsb': [u'Referency'],
'hu': [u'reflist', u'források', u'references', u'megjegyzések'],
'is': [u'reflist'],
'it': [u'References'],
'ja': [u'Reflist', u'脚注リスト'],
'ko': [u'주석', u'Reflist'],
'lt': [u'Reflist', u'Ref', u'Litref'],
'nl': [u'Reflist', u'Refs', u'FootnotesSmall', u'Reference',
u'Ref-list', u'Reference list', u'References-small', u'Reflink',
u'Referenties', u'Bron', u'Bronnen/noten/referenties', u'Bron2',
u'Bron3', u'ref', u'references', u'appendix',
u'Noot', u'FootnotesSmall'],
'pl': [u'Przypisy', u'Przypisy-lista', u'Uwagi'],
'pt': [u'Notas', u'ref-section', u'Referências', u'Reflist'],
'ru': [u'Reflist', u'Ref-list', u'Refs', u'Sources',
u'Примечания', u'Список примечаний',
u'Сноска', u'Сноски'],
'szl': [u'Przipisy', u'Připisy'],
'th': [u'รายการอ้างอิง'],
'zh': [u'Reflist', u'RefFoot', u'NoteFoot'],
},
}
# Text to be added instead of the <references /> tag.
# Define this only if required by your wiki.
referencesSubstitute = {
'wikipedia': {
'ar': u'{{مراجع}}',
'be': u'{{зноскі}}',
'da': u'{{reflist}}',
'dsb': u'{{referency}}',
'fa': u'{{پانویس}}',
'fi': u'{{viitteet}}',
'he': u'{{הערות שוליים}}',
'hsb': u'{{referency}}',
'hu': u'{{Források}}',
'pl': u'{{Przypisy}}',
'ru': u'{{примечания}}',
'szl': u'{{Przipisy}}',
'th': u'{{รายการอ้างอิง}}',
'zh': u'{{reflist}}',
},
}
# Sites where no title is required for references template
# as it is already included there
# like pl.wiki where {{Przypisy}} generates
# == Przypisy ==
# <references />
noTitleRequired = [u'pl', u'be', u'szl']
maintenance_category = 'cite_error_refs_without_references_category'
class XmlDumpNoReferencesPageGenerator:
"""
Generator which will yield Pages that might lack a references tag.
These pages will be retrieved from a local XML dump file
(pages-articles or pages-meta-current).
"""
def __init__(self, xmlFilename):
"""
Constructor.
Arguments:
* xmlFilename - The dump's path, either absolute or relative
"""
self.xmlFilename = xmlFilename
self.refR = re.compile('</ref>', re.IGNORECASE)
# The references tab can contain additional spaces and a group
# attribute.
self.referencesR = re.compile('<references.*?/>', re.IGNORECASE)
def __iter__(self):
"""XML iterator."""
from pywikibot import xmlreader
dump = xmlreader.XmlDump(self.xmlFilename)
for entry in dump.parse():
text = textlib.removeDisabledParts(entry.text)
if self.refR.search(text) and not self.referencesR.search(text):
yield pywikibot.Page(pywikibot.Site(), entry.title)
class NoReferencesBot(Bot):
"""References section bot."""
def __init__(self, generator, **kwargs):
"""Constructor."""
self.availableOptions.update({
'verbose': True,
})
super(NoReferencesBot, self).__init__(**kwargs)
self.generator = pagegenerators.PreloadingGenerator(generator)
self.site = pywikibot.Site()
self.comment = i18n.twtranslate(self.site, 'noreferences-add-tag')
self.refR = re.compile('</ref>', re.IGNORECASE)
self.referencesR = re.compile('<references.*?/>', re.IGNORECASE)
self.referencesTagR = re.compile('<references>.*?</references>',
re.IGNORECASE | re.DOTALL)
try:
self.referencesTemplates = referencesTemplates[
self.site.family.name][self.site.code]
except KeyError:
self.referencesTemplates = []
try:
self.referencesText = referencesSubstitute[
self.site.family.name][self.site.code]
except KeyError:
self.referencesText = u'<references />'
def lacksReferences(self, text):
"""Check whether or not the page is lacking a references tag."""
oldTextCleaned = textlib.removeDisabledParts(text)
if self.referencesR.search(oldTextCleaned) or \
self.referencesTagR.search(oldTextCleaned):
if self.getOption('verbose'):
pywikibot.output(u'No changes necessary: references tag found.')
return False
elif self.referencesTemplates:
templateR = u'{{(' + u'|'.join(self.referencesTemplates) + ')'
if re.search(templateR, oldTextCleaned, re.IGNORECASE | re.UNICODE):
if self.getOption('verbose'):
pywikibot.output(
u'No changes necessary: references template found.')
return False
if not self.refR.search(oldTextCleaned):
if self.getOption('verbose'):
pywikibot.output(u'No changes necessary: no ref tags found.')
return False
else:
if self.getOption('verbose'):
pywikibot.output(u'Found ref without references.')
return True
def addReferences(self, oldText):
"""
Add a references tag into an existing section where it fits into.
If there is no such section, creates a new section containing
the references tag.
* Returns : The modified pagetext
"""
# Do we have a malformed <reference> tag which could be repaired?
# Repair two opening tags or a opening and an empty tag
pattern = re.compile(r'< *references *>(.*?)'
r'< */?\s*references */? *>', re.DOTALL)
if pattern.search(oldText):
pywikibot.output('Repairing references tag')
return re.sub(pattern, '<references>\1</references>', oldText)
# Repair single unclosed references tag
pattern = re.compile(r'< *references *>')
if pattern.search(oldText):
pywikibot.output('Repairing references tag')
return re.sub(pattern, '<references />', oldText)
# Is there an existing section where we can add the references tag?
for section in i18n.translate(self.site, referencesSections):
sectionR = re.compile(r'\r?\n=+ *%s *=+ *\r?\n' % section)
index = 0
while index < len(oldText):
match = sectionR.search(oldText, index)
if match:
if textlib.isDisabled(oldText, match.start()):
pywikibot.output(
'Existing %s section is commented out, skipping.'
% section)
index = match.end()
else:
pywikibot.output(
'Adding references tag to existing %s section...\n'
% section)
newText = (
oldText[:match.end()] + u'\n' +
self.referencesText + u'\n' +
oldText[match.end():]
)
return newText
else:
break
# Create a new section for the references tag
for section in i18n.translate(self.site, placeBeforeSections):
# Find out where to place the new section
sectionR = re.compile(r'\r?\n(?P<ident>=+) *%s *(?P=ident) *\r?\n'
% section)
index = 0
while index < len(oldText):
match = sectionR.search(oldText, index)
if match:
if textlib.isDisabled(oldText, match.start()):
pywikibot.output(
'Existing %s section is commented out, won\'t add '
'the references in front of it.' % section)
index = match.end()
else:
pywikibot.output(
u'Adding references section before %s section...\n'
% section)
index = match.start()
ident = match.group('ident')
return self.createReferenceSection(oldText, index,
ident)
else:
break
# This gets complicated: we want to place the new references
# section over the interwiki links and categories, but also
# over all navigation bars, persondata, and other templates
# that are at the bottom of the page. So we need some advanced
# regex magic.
# The strategy is: create a temporary copy of the text. From that,
# keep removing interwiki links, templates etc. from the bottom.
# At the end, look at the length of the temp text. That's the position
# where we'll insert the references section.
catNamespaces = '|'.join(self.site.namespaces.CATEGORY)
categoryPattern = r'\[\[\s*(%s)\s*:[^\n]*\]\]\s*' % catNamespaces
interwikiPattern = r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]\s*'
# won't work with nested templates
# the negative lookahead assures that we'll match the last template
# occurrence in the temp text.
# FIXME:
# {{commons}} or {{commonscat}} are part of Weblinks section
# * {{template}} is mostly part of a section
# so templatePattern must be fixed
templatePattern = r'\r?\n{{((?!}}).)+?}}\s*'
commentPattern = r'<!--((?!-->).)*?-->\s*'
metadataR = re.compile(r'(\r?\n)?(%s|%s|%s|%s)$'
% (categoryPattern, interwikiPattern,
templatePattern, commentPattern), re.DOTALL)
tmpText = oldText
while True:
match = metadataR.search(tmpText)
if match:
tmpText = tmpText[:match.start()]
else:
break
pywikibot.output(
u'Found no section that can be preceeded by a new references '
u'section.\nPlacing it before interwiki links, categories, and '
u'bottom templates.')
index = len(tmpText)
return self.createReferenceSection(oldText, index)
def createReferenceSection(self, oldText, index, ident='=='):
"""Create a reference section and insert it into the given text."""
if self.site.code in noTitleRequired:
newSection = u'\n%s\n' % (self.referencesText)
else:
newSection = u'\n%s %s %s\n%s\n' % (ident,
i18n.translate(
self.site,
referencesSections)[0],
ident, self.referencesText)
return oldText[:index] + newSection + oldText[index:]
def run(self):
"""Run the bot."""
for page in self.generator:
self.current_page = page
try:
text = page.text
except pywikibot.NoPage:
pywikibot.output(u"Page %s does not exist?!"
% page.title(asLink=True))
continue
except pywikibot.IsRedirectPage:
pywikibot.output(u"Page %s is a redirect; skipping."
% page.title(asLink=True))
continue
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked?!"
% page.title(asLink=True))
continue
if page.isDisambig():
pywikibot.output(u"Page %s is a disambig; skipping."
% page.title(asLink=True))
continue
if self.site.sitename == 'wikipedia:en' and page.isIpEdit():
pywikibot.output(
u"Page %s is edited by IP. Possible vandalized"
% page.title(asLink=True))
continue
if self.lacksReferences(text):
newText = self.addReferences(text)
try:
self.userPut(page, page.text, newText, summary=self.comment)
except pywikibot.EditConflict:
pywikibot.output(u'Skipping %s because of edit conflict'
% page.title())
except pywikibot.SpamfilterError as e:
pywikibot.output(
u'Cannot change %s because of blacklist entry %s'
% (page.title(), e.url))
except pywikibot.LockedPage:
pywikibot.output(u'Skipping %s (locked page)' % page.title())
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg.startswith('-xml'):
if len(arg) == 4:
xmlFilename = i18n.input('pywikibot-enter-xml-filename')
else:
xmlFilename = arg[5:]
genFactory.gens.append(XmlDumpNoReferencesPageGenerator(xmlFilename))
elif arg == '-always':
options['always'] = True
elif arg == '-quiet':
options['verbose'] = False
else:
genFactory.handleArg(arg)
gen = genFactory.getCombinedGenerator()
if not gen:
site = pywikibot.Site()
try:
cat = site.expand_text(
site.mediawiki_message(maintenance_category))
except:
pass
else:
cat = pywikibot.Category(site, "%s:%s" % (
site.namespaces.CATEGORY, cat))
gen = cat.articles(namespaces=genFactory.namespaces or [0])
if gen:
bot = NoReferencesBot(gen, **options)
bot.run()
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == "__main__":
main()
|
icyflame/batman
|
scripts/noreferences.py
|
Python
|
mit
| 25,133
|
import os
import module
from flask import Flask, render_template, request, session, redirect, url_for, send_from_directory
from werkzeug import secure_filename
from functools import wraps
app = Flask(__name__)
# Configure upload locations
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['ALLOWED_EXTENSIONS'] = set(['chessley']) # Change this to whatever filetype to accept
# Checks if uploaded file is a valid file
def allowed_file(filename):
"""
Checks if 'filename' is allowed to be uploaded to the server
Params:
filename - String containing the name of the uploaded file
Returns:
True if the file is allowed, False otherwise
"""
return '.' in filename and filename.rsplit('.',1)[1] in app.config['ALLOWED_EXTENSIONS']
# Wraps for login requirements on certain app.routes
def login_required(f):
"""
Python function wrapper, used on functions that require being logged in to
view. Run before a function's body is run.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "authenticated" not in session or not session["authenticated"] or \
"username" not in session:
session.clear()
return redirect(url_for("login"))
return f(*args, **kwargs)
return decorated_function
def redirect_if_logged_in(f):
"""
Python function wrapper, used on functions to redirect to other pages if
the user is already logged in. Run before a function's body is run.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "authenticated" in session and session["authenticated"]:
return redirect(url_for("profile"))
return f(*args, **kwargs)
return decorated_function
############### APPLICATION SITE ROUTES ###############
@app.route("/")
@app.route("/home")
@app.route("/home/")
@redirect_if_logged_in
def home():
return render_template("home.html")
@app.route("/login", methods=["GET","POST"])
@app.route("/login/", methods=["GET","POST"])
@redirect_if_logged_in
def login():
if request.method == "POST":
REQUIRED = ["username", "pass"]
for form_elem in REQUIRED:
if form_elem not in request.form:
return render_template("login.html")
if module.authenticate(request.form['username'], request.form['pass']):
session["authenticated"] = True
session["username"] = request.form['username']
return redirect(url_for("profile"))
return render_template("login.html")
@app.route("/logout")
@app.route("/logout/")
@login_required
def logout():
session.clear()
return redirect(url_for("login"))
@app.route("/register", methods=["POST"])
@app.route("/register/", methods=["POST"])
@redirect_if_logged_in
def register():
REQUIRED = ["username", "pass", "pass2"]
for form_elem in REQUIRED:
if form_elem not in request.form:
return redirect(url_for("home"))
if request.form["pass"] != request.form["pass2"]:
return redirect(url_for("home"))
if module.newUser(request.form["username"], request.form["pass"]):
session['authenticated'] = True
session['username'] = request.form['username']
return redirect(url_for("profile"))
else:
return redirect(url_for("home"))
@app.route("/about")
@app.route("/about/")
def about():
LOGGED_IN = "authenticated" in session and session["authenticated"]
return render_template("about.html", AUTH=LOGGED_IN)
@app.route("/download", methods=["GET", "POST"])
@app.route("/download/", methods=["GET", "POST"])
@login_required
def download():
return render_template('download.html', USERNAME=session['username']) # For when the Jinja is configured
@app.route("/upload", methods=["GET","POST"])
@app.route("/upload/", methods=["GET","POST"])
@login_required
def upload():
if request.method == "POST":
file = request.files["upload_bot"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename + session["username"] + "_bot.chessley"))
return render_template("upload.html")
@app.route("/leaderboards", methods=["GET", "POST"])
@app.route("/leaderboards/", methods=["GET", "POST"])
def leaderboards():
LOGGED_IN = "authenticated" in session and session["authenticated"]
table = module.getRankedUsers()
return render_template("leaderboards.html", table=table, AUTH=LOGGED_IN)
@app.route("/profile", methods=["GET","POST"])
@app.route("/profile/", methods=["GET","POST"])
@login_required
def profile():
if 'username' in session and session['username']!=0:
#retrieve user data here
dict = module.getUser(session['username'])
#dict = {"rank":1,"elo":1400,"wins":100,"losses":50,"stalemates":0}
return render_template("profile.html", USERNAME=session['username'], DICT=dict)
return render_template("home.html")
app.secret_key = str(os.urandom(24))
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=5000)
|
elc1798/chessley-tan
|
app.py
|
Python
|
mit
| 5,117
|
"""
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
RPC_PRIORITY = 10
DEFAULT_PRIORITY = 100
|
Synerty/vortexpy
|
vortex/PayloadPriority.py
|
Python
|
mit
| 213
|
from collections.abc import Sequence
from numbers import Number
from . import Validator, Length, Range, Instance
from .compound import All
class Latitude(All):
"""Validate the given value as a number between -90 and +90 in decimal degrees, representing latitude."""
validators = [
Instance(Number),
Range(-90, 90)
]
latitude = Latitude()
class Longitude(All):
"""Validate the given value as a number between -180 and +180 in decimal degrees, representing longitude."""
validators = [
Instance(Number),
Range(-180, 180)
]
longitude = Longitude()
class Position(All):
"""Validate the given value as any sequence of exactly two elements representing latitude and longitude."""
validators = [
Instance(Sequence),
Length(slice(2, 3)) # exactly two elements long
]
def validate(self, value, context=None):
value = super().validate(value, context)
_lat, _long = value
latitude.validate(_lat)
longitude.validate(_long)
return value
position = Position()
|
marrow/schema
|
marrow/schema/validate/geo.py
|
Python
|
mit
| 1,013
|
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="scatter", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter/_legendrank.py
|
Python
|
mit
| 406
|
from havocbot.common import catch_exceptions
from havocbot.plugin import HavocBotPlugin
import imp
import logging
import os
logger = logging.getLogger(__name__)
class StatefulPlugin:
def __init__(self, havocbot, name, path):
self.path = path
self.name = name
self.handler = None
self.is_validated = False
self.init(havocbot)
# Load a havocbot plugin
# The handler is set to the exported class instance of the plugin
@catch_exceptions
def init(self, havocbot):
# Get the settings bundle for the plugin
plugin_settings = havocbot.get_settings_for_plugin(self.name)
# Look for any dependencies listed in the settings bundle
dependencies_string = next((obj[1] for obj in plugin_settings if obj[0] == 'dependencies'), None)
if dependencies_string is not None:
# Do dependency work
if did_process_dependencies_for_plugin(self.name, dependencies_string, havocbot) is True:
self.load_plugin(plugin_settings, havocbot)
else:
self.load_plugin(plugin_settings, havocbot)
def load_plugin(self, plugin_settings, havocbot):
try:
plugin = imp.load_source(self.name, self.path)
self.handler = plugin.havocbot_handler
# Check if plugin is valid. Returns a tuple of format (True/False, None/'error message string')
result_tuple = self.handler.is_valid()
if result_tuple[0] is True:
logger.debug("%s plugin passed validation" % self.name)
# Call the init method in the plugin
self.handler.init(havocbot)
if self.handler.configure(plugin_settings):
logger.debug("%s was configured successfully. Registering plugin triggers" % self.name)
# Register the triggers for the plugin
havocbot.register_triggers(self.handler.plugin_triggers)
# Confirm that the plugin has now been validated
self.is_validated = True
else:
logger.error("%s was unable to be configured. Check your settings and try again" % self.name)
else:
logger.error("%s plugin failed validation and was not loaded - %s" % (self.name, result_tuple[1]))
except ImportError as e:
logger.error("%s plugin failed to import. Install any third party dependencies and try again - %s" % (
self.name, e))
# Determines if the object at a path is a havocbot plugin
@staticmethod
def is_havocbot_file(path):
if not path.endswith(".py"):
return False
f = open(path, "rb")
data = f.read(16)
f.close()
if data.startswith(b"#!/havocbot"):
return True
return False
def did_process_dependencies_for_plugin(plugin_name, dependencies_string, havocbot):
result = False
if dependencies_string is not None:
dependency_tuple_list = [(x[0], x[1]) for x in (x.split(':') for x in dependencies_string.split(','))]
if dependency_tuple_list is not None and dependency_tuple_list:
dependencies_formatted = ', '.join("%s (%s)" % (t[0], t[1]) for t in dependency_tuple_list)
logger.info("%s plugin requires third party dependencies prior to startup - %s" % (
plugin_name, dependencies_formatted))
# Get setting from havocbot
plugins_can_install_modules = havocbot.get_havocbot_setting_by_name('plugins_can_install_modules')
if plugins_can_install_modules.lower() == 'true':
logger.info("global setting 'plugins_can_install_modules' is set to True. "
"Installing plugin dependencies")
result = install_dependencies(plugin_name, dependency_tuple_list, havocbot)
else:
result = True
return result
def install_dependencies(plugin_name, dependency_tuple_list, havocbot):
if dependency_tuple_list is not None and dependency_tuple_list:
import pip
arg_list = ['install']
for (pip_module_name, pip_module_version) in dependency_tuple_list:
arg_list.append("%s%s" % (pip_module_name, pip_module_version))
try:
return_code = pip.main(arg_list)
# Fix for pip leaking root handlers. See https://github.com/pypa/pip/issues/3043
havocbot.reset_logging()
logger.info("install_dependencies - return_code is '%s'" % return_code)
if return_code == 0:
logger.debug("%s plugin dependencies installed successfully or requirements already satisfied" %
plugin_name)
return True
else:
# logger.error(output.splitlines()[-1].decode('ascii')) # decode for python3 compatibility
logger.error("%s plugin dependencies were unable to be installed" % plugin_name)
# Catch pip not being installed
except OSError as e:
logger.error("Is pip installed? Unable to install plugin dependencies - %s" % e)
return False
return False
# Load a plugin by name
def load_plugin(havocbot, name, path):
if StatefulPlugin.is_havocbot_file(path):
logger.debug("%s is a havocbot file and passed first round of validation" % name)
return StatefulPlugin(havocbot, name, path)
return None
# Load all core plugins and attaches them to the bot
def load_plugins_core(havocbot):
# Trigger shutdown of any running plugins
unload_plugins_of_type(havocbot, 'core')
return load_plugins_of_type(havocbot, 'core')
# Load all custom plugins and attaches them to the bot
def load_plugins_custom(havocbot):
# Trigger shutdown of any running plugins
unload_plugins_of_type(havocbot, 'custom')
return load_plugins_of_type(havocbot, 'custom')
# Load plugins of a type
def load_plugins_of_type(havocbot, plugin_type):
plugins = []
if plugin_type == "core":
# Must load through pkg_resources since the bot may have been setup through pip and
# full filepaths for resources may not exist
# http://peak.telecommunity.com/DevCenter/PkgResources#resource-extraction
import pkg_resources
core_package = 'havocbot.core'
resources_list = pkg_resources.resource_listdir(core_package, '')
for f in resources_list:
# Remove file extension
name, ext = os.path.splitext(f)
if ext == '.py':
# TODO - Optimize this. resource_filename is slow
resource_filename = pkg_resources.resource_filename(core_package, f)
plugin = load_plugin(havocbot, name, resource_filename)
if plugin and isinstance(plugin.handler, HavocBotPlugin) and plugin.is_validated is True:
logger.info("%s core plugin loaded" % name)
plugins.append(plugin)
elif plugin_type == "custom":
for listing in havocbot.plugin_dirs:
folder = os.path.abspath(listing)
if os.path.isdir(folder):
for f in os.listdir(folder):
fpath = os.path.join(folder, f)
# Remove file extension
body, ext = os.path.splitext(f)
plugin = load_plugin(havocbot, body, fpath)
if plugin and isinstance(plugin.handler, HavocBotPlugin) and plugin.is_validated is True:
logger.info("%s custom plugin loaded" % body)
plugins.append(plugin)
else:
logger.error("Plugin directory '%s' was listed in the settings file but does not exist" % listing)
return plugins
# Triggers the shutdown method in all loaded plugins
def unload_plugins_of_type(havocbot, plugin_type):
if plugin_type == "core":
if havocbot.plugins_core is not None:
for plugin in havocbot.plugins_core:
# Unregister the triggers set for the plugin
havocbot.unregister_triggers(plugin.handler.plugin_triggers)
plugin.handler.shutdown()
elif plugin_type == "custom":
if havocbot.plugins_custom is not None:
for plugin in havocbot.plugins_custom:
# Unregister the triggers set for the plugin
havocbot.unregister_triggers(plugin.handler.plugin_triggers)
plugin.handler.shutdown()
else:
return
|
markperdue/havocbot
|
src/havocbot/pluginmanager.py
|
Python
|
mit
| 8,630
|
from django.core.management.base import BaseCommand
from stream_analysis.utils import cleanup
class Command(BaseCommand):
"""
Removes streaming data we no longer need.
"""
help = "Removes streaming data we no longer need."
def handle(self, *args, **options):
cleanup()
|
michaelbrooks/django-stream-analysis
|
stream_analysis/management/commands/cleanup_streams.py
|
Python
|
mit
| 300
|
# -*- coding: utf8 -*-
"""
@author: Matthias Feys (matthiasfeys@gmail.com)
@date: %(date)
"""
import theano
class Layer(object):
def __init__(self,name, params=None):
self.name=name
self.input = input
self.params = []
if params!=None:
self.setParams(params=params.__dict__.get(name))
else:
self.initParams()
def __getstate__(self):
params = {}
for param in self.params:
params[param.name] = param.get_value()
return params
def setParams(self,params):
for pname,param in params.__dict__.iteritems():
self.__dict__[pname[:-(len(self.name)+1)]] = theano.shared(param, name=pname[:-(len(self.name)+1)]+'_'+self.name, borrow=True)
def initParams():
raise NotImplementedError
class Network():
def __getstate__(self):
return dict([(layer.name,layer) for layer in self.layers])
|
codeAshu/nn_ner
|
nn/interfaces.py
|
Python
|
mit
| 939
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)
def xavier_std(fan_in, fan_out, constant=1):
return constant * math.sqrt(3.0 / (fan_in + fan_out))
|
stefanwebb/tensorflow-models
|
tensorflow_models/initializations.py
|
Python
|
mit
| 1,768
|
"""Basic contact management functions.
Contacts are linked to monitors and are used to determine where to send
alerts for monitors.
Contacts are basic name/email/phone sets.
Contacts are only stored in the database and not in memory, they are loaded
from the database each time an alert is sent.
"""
from typing import Dict, Iterable, Optional, Any, Set
from irisett.sql import DBConnection, Cursor
from irisett import (
errors,
object_models,
)
from irisett.object_exists import (
contact_exists,
active_monitor_exists,
contact_group_exists,
)
async def create_contact(dbcon: DBConnection, name: Optional[str], email: Optional[str],
phone: Optional[str], active: bool) -> str:
"""Add a contact to the database."""
q = """insert into contacts (name, email, phone, active) values (%s, %s, %s, %s)"""
q_args = (name, email, phone, active)
contact_id = await dbcon.operation(q, q_args)
return contact_id
async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:
"""Update a contacts information in the database.
Data is a dict with name/email/phone/active values that
will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'email', 'phone', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contacts set %s=%%s where id=%%s""" % key
q_args = (value, contact_id)
await cur.execute(q, q_args)
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
await dbcon.transact(_run)
async def delete_contact(dbcon: DBConnection, contact_id: int) -> None:
"""Remove a contact from the database."""
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """delete from contacts where id=%s"""
await dbcon.operation(q, (contact_id,))
async def create_contact_group(dbcon: DBConnection, name: str, active: bool) -> str:
"""Add a contact group to the database."""
q = """insert into contact_groups (name, active) values (%s, %s)"""
q_args = (name, active)
contact_group_id = await dbcon.operation(q, q_args)
return contact_group_id
async def update_contact_group(dbcon: DBConnection, contact_group_id: int, data: Dict[str, str]) -> None:
"""Update a contact groups information in the database.
Data is a dict with name/active values that will be updated.
"""
async def _run(cur: Cursor) -> None:
for key, value in data.items():
if key not in ['name', 'active']:
raise errors.IrisettError('invalid contact key %s' % key)
q = """update contact_groups set %s=%%s where id=%%s""" % key
q_args = (value, contact_group_id)
await cur.execute(q, q_args)
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
await dbcon.transact(_run)
async def delete_contact_group(dbcon: DBConnection, contact_group_id: int) -> None:
"""Remove a contact group from the database."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
q = """delete from contact_groups where id=%s"""
await dbcon.operation(q, (contact_group_id,))
async def get_all_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:
"""Get a list of all contacts for an active monitor.
This includes directly attached contacts, contacts from contact groups,
monitor groups etc.
"""
contacts = set()
contacts.update(await _active_monitor_contacts(dbcon, monitor_id))
contacts.update(await _active_monitor_contact_groups(dbcon, monitor_id))
contacts.update(await _active_monitor_monitor_group_contacts(dbcon, monitor_id))
contacts.update(await _active_monitor_monitor_group_contact_groups(dbcon, monitor_id))
return list(contacts)
async def _active_monitor_contacts(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts directly connected to the monitor.
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contacts, contacts
where active_monitor_contacts.active_monitor_id = %s
and active_monitor_contacts.contact_id = contacts.id
and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_contact_groups(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via a contact group.
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contact_groups, contact_groups, contact_group_contacts, contacts
where active_monitor_contact_groups.active_monitor_id = %s
and active_monitor_contact_groups.contact_group_id = contact_groups.id
and contact_groups.active = true
and contact_groups.id = contact_group_contacts.contact_group_id
and contact_group_contacts.contact_id = contacts.id
and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_monitor_group_contacts(dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via monitor group -> contacts
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from monitor_group_active_monitors
left join monitor_groups on monitor_group_active_monitors.monitor_group_id=monitor_groups.id
left join monitor_group_contacts on monitor_group_contacts.monitor_group_id=monitor_groups.id
left join contacts on contacts.id=monitor_group_contacts.contact_id
where monitor_group_active_monitors.active_monitor_id=%s and contacts.active = true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def _active_monitor_monitor_group_contact_groups(
dbcon: DBConnection, monitor_id: int) -> Set[object_models.Contact]:
# Get contacts connected to the monitor via monitor group -> contact group -> contacts
q = """select contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from monitor_group_active_monitors
left join monitor_groups on monitor_group_active_monitors.monitor_group_id=monitor_groups.id
left join monitor_group_contact_groups on monitor_group_contact_groups.monitor_group_id=monitor_groups.id
left join contact_groups on contact_groups.id=monitor_group_contact_groups.contact_group_id
left join contact_group_contacts on contact_group_contacts.contact_group_id=contact_groups.id
left join contacts on contacts.id=contact_group_contacts.contact_id
where monitor_group_active_monitors.active_monitor_id=%s
and contact_groups.active=true
and contacts.active=true"""
return {object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))}
async def get_contact_dict_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Dict[str, set]:
"""Get all contact addresses/numbers for a specific active monitor.
Return: Dict[str, Set(str)] for 'email' and 'phone'.
"""
ret = {
'email': set(),
'phone': set(),
} # type: Dict[str, set]
contacts = await get_all_contacts_for_active_monitor(dbcon, monitor_id)
for contact in contacts:
if contact.email:
ret['email'].add(contact.email)
if contact.phone:
ret['phone'].add(contact.phone)
return ret
async def add_contact_to_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:
"""Connect a contact and an active monitor."""
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """replace into active_monitor_contacts (active_monitor_id, contact_id) values (%s, %s)"""
q_args = (monitor_id, contact_id)
await dbcon.operation(q, q_args)
async def delete_contact_from_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:
"""Disconnect a contact and an active monitor."""
q = """delete from active_monitor_contacts where active_monitor_id=%s and contact_id=%s"""
q_args = (monitor_id, contact_id)
await dbcon.operation(q, q_args)
async def set_active_monitor_contacts(dbcon: DBConnection,
contact_ids: Iterable[int], monitor_id: int):
"""(Re-)set contacts for an active monitor.
Delete existing contacts for an active monitor and set the given new
contacts.
"""
async def _run(cur: Cursor) -> None:
q = """delete from active_monitor_contacts where active_monitor_id=%s"""
await cur.execute(q, (monitor_id,))
for contact_id in contact_ids:
q = """insert into active_monitor_contacts (active_monitor_id, contact_id) values (%s, %s)"""
q_args = (monitor_id, contact_id)
await cur.execute(q, q_args)
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
await dbcon.transact(_run)
async def get_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:
"""Get contacts for an active monitor.
Return a list of dicts, one dict describing each contacts information.
"""
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contacts, contacts
where active_monitor_contacts.active_monitor_id = %s
and active_monitor_contacts.contact_id = contacts.id"""
contacts = [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]
return contacts
async def add_contact_group_to_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:
"""Connect a contact group and an active monitor."""
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact does not exist')
q = """replace into active_monitor_contact_groups (active_monitor_id, contact_group_id) values (%s, %s)"""
q_args = (monitor_id, contact_group_id)
await dbcon.operation(q, q_args)
async def delete_contact_group_from_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:
"""Disconnect a contact group and an active monitor."""
q = """delete from active_monitor_contact_groups where active_monitor_id=%s and contact_group_id=%s"""
q_args = (monitor_id, contact_group_id)
await dbcon.operation(q, q_args)
async def set_active_monitor_contact_groups(dbcon: DBConnection,
contact_group_ids: Iterable[int], monitor_id: int) -> None:
"""(Re-)set contact_groups for an active monitor.
Delete existing contact groups for an active monitor and set the given new
contact groups.
"""
async def _run(cur: Cursor) -> None:
q = """delete from active_monitor_contact_groups where active_monitor_id=%s"""
await cur.execute(q, (monitor_id,))
for contact_group_id in contact_group_ids:
q = """insert into active_monitor_contact_groups (active_monitor_id, contact_group_id) values (%s, %s)"""
q_args = (monitor_id, contact_group_id)
await cur.execute(q, q_args)
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
await dbcon.transact(_run)
async def get_contact_groups_for_active_monitor(
dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.ContactGroup]:
"""Get contact groups for an active monitor."""
q = """select
contact_groups.id, contact_groups.name, contact_groups.active
from active_monitor_contact_groups, contact_groups
where active_monitor_contact_groups.active_monitor_id = %s
and active_monitor_contact_groups.contact_group_id = contact_groups.id"""
return [object_models.ContactGroup(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]
async def get_all_contacts(dbcon: DBConnection) -> Iterable[object_models.Contact]:
"""Get all contacts"""
q = """select id, name, email, phone, active from contacts"""
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q)]
async def get_contact(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.
"""Get a single contact if it exists."""
q = """select id, name, email, phone, active from contacts where id=%s"""
q_args = (id,)
row = await dbcon.fetch_row(q, q_args)
contact = None
if row:
contact = object_models.Contact(*row)
return contact
async def get_contacts_for_metadata(
dbcon: DBConnection, meta_key: str, meta_value: str) -> Iterable[object_models.Contact]:
q = """select c.id, c.name, c.email, c.phone, c.active
from contacts as c, object_metadata as meta
where meta.key=%s and meta.value=%s and meta.object_type="contact" and meta.object_id=c.id"""
q_args = (meta_key, meta_value)
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, q_args)]
async def add_contact_to_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:
"""Connect a contact and a contact group."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """replace into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)"""
q_args = (contact_group_id, contact_id)
await dbcon.operation(q, q_args)
async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:
"""Disconnect a contact and a contact_group."""
q = """delete from contact_group_contacts where contact_group_id=%s and contact_id=%s"""
q_args = (contact_group_id, contact_id)
await dbcon.operation(q, q_args)
async def set_contact_group_contacts(dbcon: DBConnection,
contact_group_id: int, contact_ids: Iterable[int]) -> None:
"""(Re-)set contacts for a contact group.
Delete existing contacts for a contact group and set the given new
contacts.
"""
async def _run(cur: Cursor) -> None:
q = """delete from contact_group_contacts where contact_group_id=%s"""
await cur.execute(q, (contact_group_id,))
for contact_id in contact_ids:
q = """insert into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)"""
q_args = (contact_group_id, contact_id)
await cur.execute(q, q_args)
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
await dbcon.transact(_run)
async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:
"""Get contacts for a contact group."""
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from contact_group_contacts, contacts
where contact_group_contacts.contact_group_id = %s
and contact_group_contacts.contact_id = contacts.id"""
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]
async def get_all_contact_groups(dbcon: DBConnection) -> Iterable[object_models.ContactGroup]:
q = """select id, name, active from contact_groups"""
contact_groups = [object_models.ContactGroup(*row) for row in await dbcon.fetch_all(q)]
return contact_groups
async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.
"""Get a single contact if it exists.
Return a list of dicts, one dict describing each contacts information.
"""
q = """select id, name, active from contact_groups where id=%s"""
row = await dbcon.fetch_row(q, (id,))
contact = None
if row:
contact = object_models.ContactGroup(*row)
return contact
async def get_contact_groups_for_metadata(
dbcon: DBConnection, meta_key: str, meta_value: str) -> Iterable[object_models.ContactGroup]:
q = """select cg.id, cg.name, cg.active
from contact_groups as cg, object_metadata as meta
where meta.key=%s and meta.value=%s and meta.object_type="contact_group" and meta.object_id=cg.id"""
q_args = (meta_key, meta_value)
return [object_models.ContactGroup(*row) for row in await dbcon.fetch_all(q, q_args)]
|
beebyte/irisett
|
irisett/contact.py
|
Python
|
mit
| 17,565
|
#!/usr/bin/env python
# bootstrap.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 1.0.0-alpha6
from app import db
# create all databases and tables included in the application
db.create_all()
|
ecsnavarretemit/sarai-interactive-maps-backend
|
bootstrap.py
|
Python
|
mit
| 251
|
#!/usr/bin/env python
import os
import numpy as np
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET, get_events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.honda.carstate import CarState, get_can_parser
from selfdrive.car.honda.values import CruiseButtons, CM, BP, AH, CAR, HONDA_BOSCH
from selfdrive.controls.lib.planner import A_ACC_MAX
try:
from selfdrive.car.honda.carcontroller import CarController
except ImportError:
CarController = None
# msgs sent for steering controller by camera module on can 0.
# those messages are mutually exclusive on CRV and non-CRV cars
CAMERA_MSGS = [0xe4, 0x194]
def compute_gb_honda(accel, speed):
creep_brake = 0.0
creep_speed = 2.3
creep_brake_value = 0.15
if speed < creep_speed:
creep_brake = (creep_speed - speed) / creep_speed * creep_brake_value
return float(accel) / 4.8 - creep_brake
def get_compute_gb_acura():
# generate a function that takes in [desired_accel, current_speed] -> [-1.0, 1.0]
# where -1.0 is max brake and 1.0 is max gas
# see debug/dump_accel_from_fiber.py to see how those parameters were generated
w0 = np.array([[ 1.22056961, -0.39625418, 0.67952657],
[ 1.03691769, 0.78210306, -0.41343188]])
b0 = np.array([ 0.01536703, -0.14335321, -0.26932889])
w2 = np.array([[-0.59124422, 0.42899439, 0.38660881],
[ 0.79973811, 0.13178682, 0.08550351],
[-0.15651935, -0.44360259, 0.76910877]])
b2 = np.array([ 0.15624429, 0.02294923, -0.0341086 ])
w4 = np.array([[-0.31521443],
[-0.38626176],
[ 0.52667892]])
b4 = np.array([-0.02922216])
def compute_output(dat, w0, b0, w2, b2, w4, b4):
m0 = np.dot(dat, w0) + b0
m0 = leakyrelu(m0, 0.1)
m2 = np.dot(m0, w2) + b2
m2 = leakyrelu(m2, 0.1)
m4 = np.dot(m2, w4) + b4
return m4
def leakyrelu(x, alpha):
return np.maximum(x, alpha * x)
def _compute_gb_acura(accel, speed):
# linearly extrap below v1 using v1 and v2 data
v1 = 5.
v2 = 10.
dat = np.array([accel, speed])
if speed > 5.:
m4 = compute_output(dat, w0, b0, w2, b2, w4, b4)
else:
dat[1] = v1
m4v1 = compute_output(dat, w0, b0, w2, b2, w4, b4)
dat[1] = v2
m4v2 = compute_output(dat, w0, b0, w2, b2, w4, b4)
m4 = (speed - v1) * (m4v2 - m4v1) / (v2 - v1) + m4v1
return float(m4)
return _compute_gb_acura
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.frame = 0
self.last_enable_pressed = 0
self.last_enable_sent = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.cp = get_can_parser(CP)
# *** init the major players ***
self.CS = CarState(CP)
self.VM = VehicleModel(CP)
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(self.cp.dbc_name, CP.enableCamera)
if self.CS.CP.carFingerprint == CAR.ACURA_ILX:
self.compute_gb = get_compute_gb_acura()
else:
self.compute_gb = compute_gb_honda
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
# limit the pcm accel cmd if:
# - v_ego exceeds v_target, or
# - a_ego exceeds a_target and v_ego is close to v_target
eA = a_ego - a_target
valuesA = [1.0, 0.1]
bpA = [0.3, 1.1]
eV = v_ego - v_target
valuesV = [1.0, 0.1]
bpV = [0.0, 0.5]
valuesRangeV = [1., 0.]
bpRangeV = [-1., 0.]
# only limit if v_ego is close to v_target
speedLimiter = interp(eV, bpV, valuesV)
accelLimiter = max(interp(eA, bpA, valuesA), interp(eV, bpRangeV, valuesRangeV))
# accelOverride is more or less the max throttle allowed to pcm: usually set to a constant
# unless aTargetMax is very high and then we scale with it; this help in quicker restart
return float(max(0.714, a_target / A_ACC_MAX)) * min(speedLimiter, accelLimiter)
@staticmethod
def get_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carName = "honda"
ret.carFingerprint = candidate
if candidate in HONDA_BOSCH:
ret.safetyModel = car.CarParams.SafetyModels.hondaBosch
ret.enableCamera = True
ret.radarOffCan = True
else:
ret.safetyModel = car.CarParams.SafetyModels.honda
ret.enableCamera = not any(x for x in CAMERA_MSGS if x in fingerprint)
ret.enableGasInterceptor = 0x201 in fingerprint
cloudlog.warn("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warn("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.enableCruise = not ret.enableGasInterceptor
# kg of standard extra cargo to count for drive, gas, etc...
std_cargo = 136
# FIXME: hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923 * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 192150
tireStiffnessRear_civic = 202500
# Optimized car params: tire_stiffness_factor and steerRatio are a result of a vehicle
# model optimization process. Certain Hondas have an extra steering sensor at the bottom
# of the steering rack, which improves controls quality as it removes the steering column
# torsion from feedback.
# Tire stiffness factor fictitiously lower if it includes the steering column torsion effect.
# For modeling details, see p.198-200 in "The Science of Vehicle Dynamics (2014), M. Guiggiani"
ret.steerKiBP, ret.steerKpBP = [[0.], [0.]]
ret.steerKf = 0.00006 # conservative feed-forward
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = mass_civic
ret.wheelbase = wheelbase_civic
ret.centerToFront = centerToFront_civic
ret.steerRatio = 14.63 # 10.93 is end-to-end spec
tire_stiffness_factor = 1.
# Civic at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['99c94dc769b5d96e']
ret.steerKpV, ret.steerKiV = [[0.33], [0.10]] if is_fw_modified else [[0.8], [0.24]]
if is_fw_modified:
ret.steerKf = 0.00003
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [3.6, 2.4, 1.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.54, 0.36]
elif candidate == CAR.CIVIC_HATCH:
stop_and_go = True
ret.mass = 2916. * CV.LB_TO_KG + std_cargo
ret.wheelbase = wheelbase_civic
ret.centerToFront = centerToFront_civic
ret.steerRatio = 14.63 # 10.93 is spec end-to-end
tire_stiffness_factor = 1.
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACCORD:
stop_and_go = True
ret.safetyParam = 1 # Accord and CRV 5G use an alternate user brake msg
ret.mass = 3279. * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.83
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 15.96 # 11.82 is spec end-to-end
tire_stiffness_factor = 0.8467
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 18.61 # 15.3 is spec end-to-end
tire_stiffness_factor = 0.72
# Acura at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['ff83f397542ab647']
ret.steerKpV, ret.steerKiV = [[0.45], [0.00]] if is_fw_modified else [[0.8], [0.24]]
if is_fw_modified:
ret.steerKf = 0.00003
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.CRV:
stop_and_go = False
ret.mass = 3572 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.3 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.CRV_5G:
stop_and_go = True
ret.safetyParam = 1 # Accord and CRV 5G use an alternate user brake msg
ret.mass = 3410. * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
tire_stiffness_factor = 0.677
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4354 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.PILOT:
stop_and_go = False
ret.mass = 4303 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 2.81
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ret.mass = 4515 * CV.LB_TO_KG + std_cargo
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59 # as spec
tire_stiffness_factor = 0.444 # not optimized yet
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
else:
raise ValueError("unsupported car %s" % candidate)
ret.steerControlType = car.CarParams.SteerControlType.torque
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 25.5 * CV.MPH_TO_MS
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = (tireStiffnessFront_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = (tireStiffnessRear_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
# no max steer limit VS speed
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.] # max steer allowed
ret.gasMaxBP = [0.] # m/s
ret.gasMaxV = [0.6] if ret.enableGasInterceptor else [0.] # max gas allowed
ret.brakeMaxBP = [5., 20.] # m/s
ret.brakeMaxV = [1., 0.8] # max brake allowed
ret.longPidDeadzoneBP = [0.]
ret.longPidDeadzoneV = [0.]
ret.stoppingControl = True
ret.steerLimitAlert = True
ret.startAccel = 0.5
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 0.5
return ret
# returns a car.CarState
def update(self, c):
# ******************* do can recv *******************
canMonoTimes = []
self.cp.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.cp)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.aEgo = self.CS.a_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gas pedal
ret.gas = self.CS.car_gas / 256.0
if not self.CP.enableGasInterceptor:
ret.gasPressed = self.CS.pedal_gas > 0
else:
ret.gasPressed = self.CS.user_gas_pressed
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
# FIXME: read sendcan for brakelights
brakelights_threshold = 0.02 if self.CS.CP.carFingerprint == CAR.CIVIC else 0.1
ret.brakeLights = bool(self.CS.brake_switch or
c.actuators.brake > brakelights_threshold)
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
# gear shifter lever
ret.gearShifter = self.CS.gear_shifter
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_status != 0
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = self.CS.cruise_speed_offset
ret.cruiseState.standstill = False
# TODO: button presses
buttonEvents = []
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = 'accelCruise'
elif but == CruiseButtons.DECEL_SET:
be.type = 'decelCruise'
elif but == CruiseButtons.CANCEL:
be.type = 'cancel'
elif but == CruiseButtons.MAIN:
be.type = 'altButton3'
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = 'altButton1'
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
# TODO: I don't like the way capnp does enums
# These strings aren't checked at compile time
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.steer_error:
events.append(create_event('steerUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
elif self.CS.steer_warning:
events.append(create_event('steerTempUnavailable', [ET.WARNING]))
if self.CS.brake_error:
events.append(create_event('brakeUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if not ret.gearShifter == 'drive':
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse':
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.brake_hold:
events.append(create_event('brakeHold', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CS.park_brake:
events.append(create_event('parkBrake', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CP.enableCruise and ret.vEgo < self.CP.minEnableSpeed:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if self.CP.enableCruise and not ret.cruiseState.enabled and c.actuators.brake <= 0.:
# non loud alert if cruise disbales below 25mph as expected (+ a little margin)
if ret.vEgo < self.CP.minEnableSpeed + 2.:
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
else:
events.append(create_event("cruiseDisabled", [ET.IMMEDIATE_DISABLE]))
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.append(create_event('manualRestart', [ET.WARNING]))
cur_time = sec_since_boot()
enable_pressed = False
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in ["accelCruise", "decelCruise"] and not b.pressed:
self.last_enable_pressed = cur_time
enable_pressed = True
# do disable on button down
if b.type == "cancel" and b.pressed:
events.append(create_event('buttonCancel', [ET.USER_DISABLE]))
if self.CP.enableCruise:
# KEEP THIS EVENT LAST! send enable event if button is pressed and there are
# NO_ENTRY events, so controlsd will display alerts. Also not send enable events
# too close in time, so a no_entry will not be followed by another one.
# TODO: button press should be the only thing that triggers enble
if ((cur_time - self.last_enable_pressed) < 0.2 and
(cur_time - self.last_enable_sent) > 0.2 and
ret.cruiseState.enabled) or \
(enable_pressed and get_events(events, [ET.NO_ENTRY])):
events.append(create_event('buttonEnable', [ET.ENABLE]))
self.last_enable_sent = cur_time
elif enable_pressed:
events.append(create_event('buttonEnable', [ET.ENABLE]))
ret.events = events
ret.canMonoTimes = canMonoTimes
# update previous brake/gas pressed
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
# cast to reader so it can't be modified
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
hud_alert = {
"none": AH.NONE,
"fcw": AH.FCW,
"steerRequired": AH.STEER,
"brakePressed": AH.BRAKE_PRESSED,
"wrongGear": AH.GEAR_NOT_D,
"seatbeltUnbuckled": AH.SEATBELT,
"speedTooHigh": AH.SPEED_TOO_HIGH}[str(c.hudControl.visualAlert)]
snd_beep, snd_chime = {
"none": (BP.MUTE, CM.MUTE),
"beepSingle": (BP.SINGLE, CM.MUTE),
"beepTriple": (BP.TRIPLE, CM.MUTE),
"beepRepeated": (BP.REPEATED, CM.MUTE),
"chimeSingle": (BP.MUTE, CM.SINGLE),
"chimeDouble": (BP.MUTE, CM.DOUBLE),
"chimeRepeated": (BP.MUTE, CM.REPEATED),
"chimeContinuous": (BP.MUTE, CM.CONTINUOUS)}[str(c.hudControl.audibleAlert)]
pcm_accel = int(clip(c.cruiseControl.accelOverride,0,1)*0xc6)
self.CC.update(self.sendcan, c.enabled, self.CS, self.frame, \
c.actuators, \
c.cruiseControl.speedOverride, \
c.cruiseControl.override, \
c.cruiseControl.cancel, \
pcm_accel, \
hud_v_cruise, c.hudControl.lanesVisible, \
hud_show_car = c.hudControl.leadVisible, \
hud_alert = hud_alert, \
snd_beep = snd_beep, \
snd_chime = snd_chime)
self.frame += 1
|
TheMutley/openpilot
|
selfdrive/car/honda/interface.py
|
Python
|
mit
| 23,054
|
#!/usr/bin/env python
import os
import sys
import argparse
import pat3dem.star as p3s
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <a star file>
Write two star files after screening by an item and a cutoff in the star file.
Write one star file after screening by a file containing blacklist/whitelist (either keyword or item).
"""
args_def = {'screen':'0', 'cutoff':'00', 'sfile':'0', 'white':0}
parser = argparse.ArgumentParser()
parser.add_argument("star", nargs='*', help="specify a star file to be screened")
parser.add_argument("-s", "--screen", type=str, help="specify the item, by which the star file will be screened, by default {} (no screening). e.g., 'OriginX'".format(args_def['screen']))
parser.add_argument("-c", "--cutoff", type=str, help="specify the cutoff, by default '{}' (-s and -sf will be combined)".format(args_def['cutoff']))
parser.add_argument("-sf", "--sfile", type=str, help="specify a file containing a keyword each line, by default '{}' (no screening). e.g., 'f.txt'".format(args_def['sfile']))
parser.add_argument("-w", "--white", type=int, help="specify as 1 if you provide a whitelist in -sf".format(args_def['white']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# preprocess -sf
if args.sfile != '0':
lines_sf = open(args.sfile).readlines()
lines_sfile = []
for line in lines_sf:
line = line.strip()
if line != '':
lines_sfile += [line]
# get the star file
star = args.star[0]
basename = os.path.basename(os.path.splitext(star)[0])
star_dict = p3s.star_parse(star, 'data_')
header = star_dict['data_'] + star_dict['loop_']
header_len = len(header)
with open(star) as read_star:
lines = read_star.readlines()[header_len:-1]
if args.screen != '0':
# get the sc number
scn = star_dict['_rln'+args.screen]
if args.cutoff != '00':
# Name the output files
screened1 = '{}_screened_{}-gt-{}.star'.format(basename, args.screen, args.cutoff)
screened2 = '{}_screened_{}-le-{}.star'.format(basename, args.screen, args.cutoff)
write_screen1 = open(screened1, 'w')
write_screen1.write(''.join(header))
write_screen2 = open(screened2, 'w')
write_screen2.write(''.join(header))
for line in lines:
if float(line.split()[scn]) > float(args.cutoff):
write_screen1.write(line)
else:
write_screen2.write(line)
write_screen1.write(' \n')
write_screen1.close()
write_screen2.write(' \n')
write_screen2.close()
print 'The screened star files have been written in {} and {}!'.format(screened1, screened2)
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen:
write_screen.write(''.join(header))
if args.white == 0:
for line in lines:
key = line.split()[scn]
if key not in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
else:
for line in lines:
key = line.split()[scn]
if key in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
write_screen.write(' \n')
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen:
write_screen.write(''.join(header))
if args.white == 0:
for line in lines:
skip = 0
for key in lines_sfile:
if key in line:
skip = 1
print 'Skip {}.'.format(key)
break
if skip == 0:
write_screen.write(line)
else:
for line in lines:
for key in lines_sfile:
if key in line:
print 'Include {}.'.format(key)
write_screen.write(line)
break
write_screen.write(' \n')
if __name__ == '__main__':
main()
|
emkailu/PAT3DEM
|
bin/p3starscreen.py
|
Python
|
mit
| 3,906
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sumEvenGrandparent(self, root: TreeNode) -> int:
total = 0
def dfs(root, p, gp):
if root is None:
return
nonlocal total
if gp and (gp.val & 1) == 0:
total += root.val
dfs(root.left, root, p)
dfs(root.right, root, p)
dfs(root, None, None)
return total
|
jiadaizhao/LeetCode
|
1301-1400/1315-Sum of Nodes with Even-Valued Grandparent/1315-Sum of Nodes with Even-Valued Grandparent.py
|
Python
|
mit
| 565
|
__author__ = 'xubinggui'
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print(self.score)
bart = Student('Bart Simpson', 59)
bart.print_score()
|
xu6148152/Binea_Python_Project
|
oop/student/student.py
|
Python
|
mit
| 246
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="heatmap", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/heatmap/_uirevision.py
|
Python
|
mit
| 447
|
import time
class RecordAccumulator(object):
def __init__(self, buffer_class, config):
self.config = config
self.buffer_time_limit = config['buffer_time_limit']
self._buffer_class = buffer_class
self._reset_buffer()
def _reset_buffer(self):
self._buffer = self._buffer_class(config=self.config)
self._buffer_started_at = None
def try_append(self, record):
"""Attempt to accumulate a record. Return False if buffer is full."""
success = self._buffer.try_append(record)
if success:
self._buffer_started_at = time.time()
return success
def is_ready(self):
"""Check whether the buffer is ready."""
if self._buffer_started_at is None:
return False
if self._buffer.is_ready():
return True
elapsed = time.time() - self._buffer_started_at
return elapsed >= self.buffer_time_limit
def has_records(self):
"""Check whether the buffer has records."""
return self._buffer_started_at is not None
def flush(self):
"""Close the buffer and return it."""
if self._buffer_started_at is None:
return
buf = self._buffer.flush()
self._reset_buffer()
return buf
|
ludia/kinesis_producer
|
kinesis_producer/accumulator.py
|
Python
|
mit
| 1,297
|