code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import datetime
import logging
import os
import signal
import threading
import time
from multiprocessing import Event as ProcessEvent
from multiprocessing import Process
try:
import gevent
from gevent import Greenlet
from gevent.event import Event as GreenEvent
except ImportError:
Greenlet = GreenEvent = None
from huey.exceptions import DataStoreGetException
from huey.exceptions import QueueException
from huey.exceptions import QueueReadException
from huey.exceptions import DataStorePutException
from huey.exceptions import QueueWriteException
from huey.exceptions import ScheduleAddException
from huey.exceptions import ScheduleReadException
from huey.registry import registry
class BaseProcess(object):
def __init__(self, huey, utc):
self.huey = huey
self.utc = utc
def get_now(self):
if self.utc:
return datetime.datetime.utcnow()
return datetime.datetime.now()
def sleep_for_interval(self, start_ts, nseconds):
delta = time.time() - start_ts
if delta < nseconds:
time.sleep(nseconds - (time.time() - start_ts))
def enqueue(self, task):
try:
self.huey.enqueue(task)
except QueueWriteException:
self._logger.error('Error enqueueing task: %s' % task)
else:
self.huey.emit_task('enqueued', task)
def loop(self, now=None):
raise NotImplementedError
class Worker(BaseProcess):
def __init__(self, huey, default_delay, max_delay, backoff, utc):
self.delay = self.default_delay = default_delay
self.max_delay = max_delay
self.backoff = backoff
self._logger = logging.getLogger('huey.consumer.Worker')
super(Worker, self).__init__(huey, utc)
def loop(self, now=None):
self._logger.debug('Checking for message')
task = None
exc_raised = True
try:
task = self.huey.dequeue()
except QueueReadException as exc:
self._logger.exception('Error reading from queue')
except QueueException:
self._logger.exception('Queue exception')
except KeyboardInterrupt:
raise
except:
self._logger.exception('Unknown exception')
else:
exc_raised = False
if task:
self.delay = self.default_delay
self.handle_task(task, now or self.get_now())
elif exc_raised or not self.huey.blocking:
self.sleep()
def sleep(self):
if self.delay > self.max_delay:
self.delay = self.max_delay
self._logger.debug('No messages, sleeping for: %s' % self.delay)
time.sleep(self.delay)
self.delay *= self.backoff
def handle_task(self, task, ts):
if not self.huey.ready_to_run(task, ts):
self._logger.info('Adding %s to schedule' % task)
self.add_schedule(task)
elif not self.is_revoked(task, ts):
self.process_task(task, ts)
else:
self._logger.debug('Task %s was revoked, not running' % task)
def process_task(self, task, ts):
self._logger.info('Executing %s' % task)
self.huey.emit_task('started', task)
try:
self.huey.execute(task)
except DataStorePutException:
self._logger.exception('Error storing result')
except:
self._logger.exception('Unhandled exception in worker thread')
self.huey.emit_task('error', task, error=True)
if task.retries:
self.huey.emit_task('retrying', task)
self.requeue_task(task, self.get_now())
else:
self.huey.emit_task('finished', task)
def requeue_task(self, task, ts):
task.retries -= 1
self._logger.info('Re-enqueueing task %s, %s tries left' %
(task.task_id, task.retries))
if task.retry_delay:
delay = datetime.timedelta(seconds=task.retry_delay)
task.execute_time = ts + delay
self._logger.debug('Execute %s at: %s' % (task, task.execute_time))
self.add_schedule(task)
else:
self.enqueue(task)
def add_schedule(self, task):
try:
self.huey.add_schedule(task)
except ScheduleAddException:
self._logger.error('Error adding task to schedule: %s' % task)
else:
self.huey.emit_task('scheduled', task)
def is_revoked(self, task, ts):
try:
if self.huey.is_revoked(task, ts, peek=False):
self.huey.emit_task('revoked', task)
return True
return False
except DataStoreGetException:
self._logger.error('Error checking if task is revoked: %s' % task)
return True
class Scheduler(BaseProcess):
def __init__(self, huey, interval, utc, periodic):
super(Scheduler, self).__init__(huey, utc)
self.interval = min(interval, 60)
self.periodic = periodic
if periodic:
# Determine the periodic task interval.
self._q, self._r = divmod(60, self.interval)
if not self._r:
self._q -= 1
self._counter = 0
self._logger = logging.getLogger('huey.consumer.Scheduler')
def loop(self, now=None):
now = now or self.get_now()
start = time.time()
for task in self.huey.read_schedule(now):
self._logger.info('Scheduling %s for execution' % task)
self.enqueue(task)
should_sleep = True
if self.periodic:
if self._counter == self._q:
if self._r:
self.sleep_for_interval(start, self._r)
self._logger.debug('Checking periodic tasks')
self._counter = 0
for task in self.huey.read_periodic(now):
self._logger.info('Scheduling periodic task %s.' % task)
self.enqueue(task)
self.sleep_for_interval(start, self.interval - self._r)
should_sleep = False
else:
self._counter += 1
if should_sleep:
self.sleep_for_interval(start, self.interval)
class Environment(object):
def get_stop_flag(self):
raise NotImplementedError
def create_process(self, runnable, name):
raise NotImplementedError
class ThreadEnvironment(Environment):
def get_stop_flag(self):
return threading.Event()
def create_process(self, runnable, name):
t = threading.Thread(target=runnable, name=name)
t.daemon = True
return t
class GreenletEnvironment(Environment):
def get_stop_flag(self):
return GreenEvent()
def create_process(self, runnable, name):
def run_wrapper():
gevent.sleep()
runnable()
gevent.sleep()
return Greenlet(run=run_wrapper)
class ProcessEnvironment(Environment):
def get_stop_flag(self):
return ProcessEvent()
def create_process(self, runnable, name):
p = Process(target=runnable, name=name)
p.daemon = True
return p
worker_to_environment = {
'thread': ThreadEnvironment,
'greenlet': GreenletEnvironment,
'gevent': GreenletEnvironment, # Same as greenlet.
'process': ProcessEnvironment,
}
class Consumer(object):
def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1,
backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1,
worker_type='thread'):
self._logger = logging.getLogger('huey.consumer')
self.huey = huey
self.workers = workers
self.periodic = periodic
self.default_delay = initial_delay
self.backoff = backoff
self.max_delay = max_delay
self.utc = utc
self.scheduler_interval = max(min(scheduler_interval, 60), 1)
self.worker_type = worker_type
if worker_type not in worker_to_environment:
raise ValueError('worker_type must be one of %s.' %
', '.join(worker_to_environment))
else:
self.environment = worker_to_environment[worker_type]()
self._received_signal = False
self.stop_flag = self.environment.get_stop_flag()
scheduler = self._create_runnable(self._create_scheduler())
self.scheduler = self.environment.create_process(
scheduler,
'Scheduler')
self.worker_threads = []
for i in range(workers):
worker = self._create_runnable(self._create_worker())
self.worker_threads.append(self.environment.create_process(
worker,
'Worker-%d' % (i + 1)))
def _create_worker(self):
return Worker(
huey=self.huey,
default_delay=self.default_delay,
max_delay=self.max_delay,
backoff=self.backoff,
utc=self.utc)
def _create_scheduler(self):
return Scheduler(
huey=self.huey,
interval=self.scheduler_interval,
utc=self.utc,
periodic=self.periodic)
def _create_runnable(self, consumer_process):
def _run():
try:
while not self.stop_flag.is_set():
consumer_process.loop()
except KeyboardInterrupt:
pass
return _run
def start(self):
self._logger.info('Huey consumer started with %s %s, PID %s' % (
self.workers,
self.worker_type,
os.getpid()))
self._logger.info('Scheduler runs every %s seconds.' % (
self.scheduler_interval))
self._logger.info('Periodic tasks are %s.' % (
'enabled' if self.periodic else 'disabled'))
self._set_signal_handler()
msg = ['The following commands are available:']
for command in registry._registry:
msg.append('+ %s' % command.replace('queuecmd_', ''))
self._logger.info('\n'.join(msg))
self.scheduler.start()
for worker in self.worker_threads:
worker.start()
def stop(self):
self.stop_flag.set()
self._logger.info('Shutting down')
def run(self):
self.start()
while True:
try:
is_set = self.stop_flag.wait(timeout=0.1)
time.sleep(0.1)
except KeyboardInterrupt:
self.stop()
except:
self._logger.exception('Error in consumer.')
self.stop()
else:
if self._received_signal:
self.stop()
if self.stop_flag.is_set():
break
self._logger.info('Consumer exiting.')
def _set_signal_handler(self):
signal.signal(signal.SIGTERM, self._handle_signal)
def _handle_signal(self, sig_num, frame):
self._logger.info('Received SIGTERM')
self._received_signal = True
| deathowl/huey | huey/consumer.py | Python | mit | 11,130 |
from setuptools import setup, find_packages
import sys, os, glob
version = '0.7.1'
setup(name='seqtools',
version=version,
description="",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Sean Davis',
author_email='seandavi@gmail.com',
url='https://github.com/seandavi/seqtools',
license='MIT',
scripts=glob.glob('scripts/*'),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'PyVCF>=0.6.5',
'pylev'
#'python-Levenshtein'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| lowks/SDST | setup.py | Python | mit | 788 |
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: magic
"""
from django.contrib import admin
from blog.models import User
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext, ugettext_lazy as _
class BlogUserAdmin(UserAdmin):
filesets = (
(None, {'fields': ('username', 'email', 'password')}),
(_('Personal info'), {'fields': ('email', 'qq', 'phone')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': {'last_login', 'date_joined'}}),
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('username', 'email', 'password1', 'password2'),
}),
)
admin.site.register(User, BlogUserAdmin) | csunny/blog_project | source/apps/blog/admin/user.py | Python | mit | 863 |
#!/usr/bin/env python
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
# Script to make a new python-sensor release on Github
# Requires the Github CLI to be installed and configured: https://github.com/cli/cli
import os
import sys
import distutils.spawn
from subprocess import check_output
if len(sys.argv) != 2:
raise ValueError('Please specify the version to release. e.g. "1.27.1"')
if sys.argv[1] in ['-h', '--help']:
filename = os.path.basename(__file__)
print("Usage: %s <version number>" % filename)
print("Exampe: %s 1.27.1" % filename)
print("")
print("This will create a release on Github such as:")
print("https://github.com/instana/python-sensor/releases/tag/v1.27.1")
# Check requirements first
for cmd in ["gh"]:
if distutils.spawn.find_executable(cmd) is None:
print("Can't find required tool: %s" % cmd)
sys.exit(1)
version = sys.argv[1]
semantic_version = 'v' + version
title = version
body = """
This release includes the following fixes & improvements:
*
Available on PyPI:
https://pypi.python.org/pypi/instana/%s
""" % version
response = check_output(["gh", "release", "create", semantic_version,
"-d", # draft
"-R", "instana/python-sensor",
"-t", semantic_version,
"-n", body])
print("If there weren't any failures, the draft release is available at:")
print(response.strip().decode())
| instana/python-sensor | bin/create_general_release.py | Python | mit | 1,486 |
#!/usr/bin/env python
# encoding: utf-8
# Here's a simple script (feels like a program, though) that prints out
# the first n narcissistic numbers, where n is provided on the command line. import sys
import sys
def numDigits(num):
"""Returns the number of digits making
up a number, not counting leading zeroes,
except for the number 0 itself."""
if(num ==0): return 1
digitCount = 0
while(num > 0):
digitCount += 1
num /= 10
return digitCount
def isNarcissistic(num):
"""Returns True if and only if the number is a narcissistic number."""
originalNum = num
total = 0
exp = numDigits(num)
while(num>0):
digits = num%10
total += digits ** exp
num /= 10
return total == originalNum
def listNarcissisticNumbers(numNeeded):
"""Searches for an prints out the first 'numNeeded' narcissistic numbers."""
numFound = 0
numToConsider = 0
print ("here are the first ", numNeeded, " narcissistic numbers.")
while(numFound < numNeeded):
if(isNarcissistic(numFound)):
numFound += 1
print ("Find a Narcissistic number: ",numToConsider)
numToConsider += 1
print("Done!")
def getNumberNeeded():
numNeeded = 9; # this is the default number
if len(sys.argv) > 1:
try:
numNeeded = int(sys.argv[1])
except ValueError:
print ("Non-integral argument encountered... using default.")
return numNeeded
listNarcissisticNumbers(getNumberNeeded())
| nakayamaqs/PythonModule | Learning/narcissistic.py | Python | mit | 1,383 |
# -*- coding: utf-8 -*-
#
from . import ek90, porcelain
| nschloe/maelstrom | examples/problems/my_materials/__init__.py | Python | mit | 57 |
import unittest
from y2017.day5 import *
class TestDay5(unittest.TestCase):
def test_part_A(self):
self.assertEqual(jump_increment('0\n3\n0\n1\n-3\n'), 5)
def test_part_B(self):
self.assertEqual(jump_conditional_increment('0\n3\n0\n1\n-3\n'), 10)
| martakus/advent-of-code | y2017/tests/test5.py | Python | mit | 274 |
# -*- coding: utf-8 -*-
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Field, Layout, Submit
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import User, UserProfile
class UserForm(forms.ModelForm):
"""
Edit username
"""
def clean_username(self):
if self.initial['username'] == self.cleaned_data['username']:
raise forms.ValidationError(
_("Please choose a different username."))
try:
User.objects.get(username=self.cleaned_data['username'])
raise ValidationError(
_('A user with that username already exists.'))
except User.DoesNotExist:
pass
return self.cleaned_data['username']
class Meta:
model = User
fields = ['username']
class UserProfileForm(forms.ModelForm):
"""
Edit user profile
"""
# avatar = forms.ImageField(required=False) # TODO
name = forms.CharField(label=_('Name'), required=False, max_length=255)
location = forms.CharField(label=_('Location'),
required=False, max_length=255)
website = forms.CharField(label=_('Website'),
required=False, max_length=255)
bio = forms.CharField(label=_('About me'),
required=False,
widget=forms.Textarea())
helper = FormHelper()
helper.form_class = 'users-update'
helper.form_action = 'users:update'
helper.layout = Layout(
# 'avatar', # TODO
'name',
'location',
'website',
HTML('<label for="id_status" class="control-label ">'),
HTML(_('Occupation')),
HTML('</label>'),
Div('student', 'assistant', 'professional', 'professor',
css_class='users-update-status'),
Field('bio', rows="3", css_class='input-xlarge'),
FormActions(
Submit('submit', _('Update Profile'), css_class="btn-primary"),
),
)
class Meta:
model = UserProfile
exclude = ['user', 'avatar']
DELETE_CONFIRMATION_PHRASE = _('delete my account')
class UserDeleteForm(forms.ModelForm):
confirmation_phrase_en = _('To verify, type "<span class='
'"confirmation-phrase do-not-copy-me">'
'delete my account</span>" below:')
form_labels = {
'sudo_login': _('Your username or email:'),
'confirmation_phrase': confirmation_phrase_en,
'sudo_password': _('Confirm your password:'),
}
sudo_login = forms.CharField(
label=form_labels['sudo_login'],
required=True,
max_length=255
)
confirmation_phrase = forms.CharField(
label=form_labels['confirmation_phrase'],
required=True,
max_length=255
)
sudo_password = forms.CharField(
label=form_labels['sudo_password'],
required=True,
max_length=128,
widget=forms.PasswordInput
)
helper = FormHelper()
helper.form_class = 'users-delete'
helper.form_action = 'users:account'
helper.layout = Layout(
Field('sudo_login', css_class='form-control'),
Field('confirmation_phrase', css_class='form-control'),
Field('sudo_password', css_class='form-control'),
FormActions(
Submit('submit_delete', _('Delete your account'),
css_class="btn btn-danger"),
),
)
def clean_sudo_login(self):
login = self.cleaned_data.get("sudo_login")
if login != self.user.username and login != self.user.email:
raise forms.ValidationError(_("The login and/or password you "
"specified are not correct."))
return self.cleaned_data["sudo_login"]
def clean_confirmation_phrase(self):
confirmation_phrase = self.cleaned_data.get("confirmation_phrase")
if str(DELETE_CONFIRMATION_PHRASE) != confirmation_phrase:
raise forms.ValidationError(
_("Confirmation phrase is not correct."))
return self.cleaned_data["confirmation_phrase"]
def clean_sudo_password(self):
password = self.cleaned_data.get("sudo_password")
if not self.user.check_password(password):
raise forms.ValidationError(_("The login and/or password you "
"specified are not correct."))
return self.cleaned_data["sudo_password"]
def is_valid(self):
self.user = self.instance
return super(UserDeleteForm, self).is_valid()
class Meta:
model = User
fields = []
| maru/fiubar | fiubar/users/forms.py | Python | mit | 4,831 |
import dj_database_url
import pkg_resources
from yawn.settings.base import *
# this uses DATABASE_URL env variable:
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = [os.environ.get('ALLOWED_HOSTS')]
# Allow anonymous read
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
]
INSTALLED_APPS += ['raven.contrib.django']
try:
yawn_version = pkg_resources.require("yawns")[0].version
except:
yawn_version = None
RAVEN_CONFIG = {
'dsn': os.environ.get('SENTRY_DSN'),
'release': yawn_version,
'name': os.environ.get('KUBERNETES_POD_NAME'),
'include_paths': ['yawn'],
}
| aclowes/yawn-gke | yawn_settings.py | Python | mit | 727 |
"""
Lua pattern matcher based on a NFA
inspired by
http://swtch.com/~rsc/regexp/regexp1.html
"""
from rpyre.interface.lua import compile_re
from rpyre.matching import find
def main(args):
n = 20
s = args[1]
#s = "(a|b)*a%sa(a|b)*$" % ("(a|b)" * n, )
print s
evilregex = compile_re(s)
import os
chunks = []
# use os.read to be RPython compatible
while True:
s = os.read(0, 4096)
if not s:
break
chunks.append(s)
s = "".join(chunks)
print len(s)
print find(evilregex, s, 0)
"""
for x in find2(evilregex, s, 0):
print x
"""
return 0
# needed for the PyPy translation toolchain
def target(*args):
return main, None
def jitpolicy(*args):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| fhahn/rpyre | tools/cli.py | Python | mit | 904 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# main file for creating apsimRegion experiments
#==============================================================================
import os
from apsimRegions.preprocess.configMaker import create_many_config_files
from apsimRegions.preprocess.apsimPreprocess import preprocess_many
from apsimRegions.preprocess.batch import create_run_all_batchfile
def main():
experimentName = 'test'
outputDir = 'C:/ExampleProject/output/experiments/maize/{0}'.format(experimentName)
# validArgs are 'resolution','crop','model','crit_fr_asw', 'sowStart', or 'soilName'
#factorials = {'soilName':['auto','HCGEN0001','HCGEN0003','HCGEN0007','HCGEN0010','HCGEN0011','HCGEN0013','HCGEN0014','HCGEN0015','HCGEN0016','HCGEN0017','HCGEN0025']}
#factorials = {'sowStart':['auto','01-apr','08-apr','15-apr','22-apr','01-may','08-may','15-may','22-may','01-jun','08-jun','15-jun','22-jun','01-jul']}
factorials = {'crit_fr_asw':['0.0','0.05','0.15','0.25','0.50','0.75','0.95','1.0']}
otherArgs = {'metFileDir':'C:/ExampleProject/metfiles/working/%(met)s',\
'gridLutPath':'C:/ExampleProject/lookupTables/exampleLookupTable.csv',\
'clockStart':'1/1/2001', 'clockEnd':'31/12/2010'}
# create directory if it doesn't exist
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
# create config files
print 'Creating configuration files...'
runs = create_many_config_files(outputDir, factorials, otherArgs)
# create apsim files
print 'Saving .apsim and .bat files...'
preprocess_many(outputDir, runs.keys()[0], runs.keys()[-1])
# create run all batchfile
create_run_all_batchfile(outputDir, runs, experimentName)
# feedback
print 'All files saved to:\r', outputDir
print '\nFolder', ': Variable'
for key in runs.keys():
print '{0:6} : {1}'.format(key, runs[key])
# save text file of run data
if not os.path.isfile(os.path.join(outputDir,'readme.txt')):
mode = 'w'
else:
mode = 'a'
with open(os.path.join(outputDir,'readme.txt'),mode=mode) as f:
f.write('Folder : Variable')
for key in runs.keys():
f.write('\n{0:6} : {1}'.format(key, runs[key]))
f.write('\n')
print '\n***** Done! *****'
# Run main() if module is run as a program
if __name__ == '__main__':
main() | dhstack/apsimRegions | scripts/preprocess.py | Python | mit | 2,525 |
from django.contrib import admin
from .models import CustomUser, Equipment, Search, TagManagement
from .models import Reserved, Request, Vote, Log, Tag
admin.site.register(CustomUser)
admin.site.register(Equipment)
admin.site.register(Search)
admin.site.register(Reserved)
admin.site.register(Request)
admin.site.register(Vote)
admin.site.register(Log)
admin.site.register(TagManagement)
admin.site.register(Tag)
| XMLPro/ManagementSystem | system/admin.py | Python | mit | 414 |
import uuid
import base64
import re
def generate_key():
"""
generates a uuid, encodes it with base32 and strips it's padding.
this reduces the string size from 32 to 26 chars.
"""
return base64.b32encode(uuid.uuid4().bytes).strip('=').lower()[0:12]
def thousand_separator(x=0, sep='.', dot=','):
"""
creates a string of number separated by selected delimiters
"""
num, _, frac = str(x).partition(dot)
num = re.sub(r'(\d{3})(?=\d)', r'\1'+sep, num[::-1])[::-1]
if frac:
num += dot + frac
return num
def new_parser(passed_object, request_data):
"""
Maps passed request object from client into expected object.
Use this for creation of new object by passing an instantiated
empty object into the passed_object variable
"""
for item, value in request_data.values.iteritems():
if hasattr(passed_object, item) and value is not None:
try:
setattr(passed_object, item, value)
except:
setattr(passed_object, item, convert_to_date(value))
passed_object.id = generate_key()
return passed_object
def edit_parser(passed_object, request_data):
"""
Maps value from passed json object for data edit purposes.
You need to pass in object resulting from query into the
passed_object variable
"""
for item in request_data.values:
if item != "id" and hasattr(passed_object, item) and request_data.values.get(item) != None:
setattr(passed_object, item, request_data.values.get(item))
return passed_object
def convert_to_date(date_string):
from datetime import date
input = date_string.split("-")
return date(int(input[0]),int(input[1]),int(input[2]))
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer) | hsw5138/bis | backend/helpers.py | Python | mit | 2,048 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LoCatrConfig(AppConfig):
name = 'LoCatr'
| scifiswapnil/Project-LoCatr | LoCatr/LoCatr/apps.py | Python | mit | 152 |
from collections import deque
from sys import stdout
import re
contextLines = 3
class DiffLines:
"""A single span of lines from a chunk of diff, used to store either the original
or the changed lines"""
def __init__(self, start, lines):
"""Note: end is inclusive"""
self.start = start
# prepopulate end, which for empty line sets is one less than the start
self.end = start + len(lines)
self.lines = lines
def take(self, n):
if n > len(self.lines):
raise ValueError('not enough lines remaining')
piece = DiffLines(self.start, self.lines[:n])
self.start += n
self.lines = self.lines[n:]
return piece
def bump(self, n):
self.start += n
self.end += n
def count(self):
return self.end - self.start
def write(self, output, prefix=''):
for line in self.lines:
output.write(prefix)
output.write(line)
def isEmpty(self):
return self.count() == 0
def setStart(self, start):
# set end before count() is messed up
self.end = start + self.count()
self.start = start
class DiffChunk:
"""A single piece of diff, original and changed line spans both"""
def __init__(self, original, changed, preContext=None, postContext=None):
self.original = original
self.changed = changed
if preContext is None:
self.preContext = []
else:
self.preContext = preContext[-contextLines:]
if postContext is None:
self.postContext = []
else:
self.postContext = postContext[:contextLines]
def take(self, n, m=None):
if m is None:
m = n
retOrig = self.original.take(n)
retPost = self.original.lines + self.postContext
retPost = retPost[:contextLines]
ret = DiffChunk(retOrig, self.changed.take(m),
self.preContext, retPost)
self.preContext += ret.original.lines
self.preContext = self.preContext[-contextLines:]
return ret
def delta(self):
"""Determine how many lines this change adds"""
return self.changed.count() - self.original.count()
def update(self, other):
"""Takes the other patch chunk and assumes that it's been applied.
Returns True if changes were made"""
if other.original.start <= self.original.start:
# overlap on the preContext part
#self.original.bump(other.delta())
overlap = other.original.end - (self.original.start - len(self.preContext))
if overlap > 0:
overlapstart = max(0, overlap - other.original.count())
self.preContext[overlapstart:overlap] = other.changed.lines
self.preContext = self.preContext[-contextLines:]
return True
if other.original.end >= self.original.end:
# overlap on the postContext part
overlap = self.original.end + len(self.postContext) - other.original.start
if overlap > 0:
oend = len(self.postContext) - overlap + other.original.count()
self.postContext[-overlap:oend] = other.changed.lines
self.postContext = self.postContext[:contextLines]
return True
return False
def resetChangedLineStart(self):
"""When taken on its own, both the original and changed lines start
at the same line number. This makes it so."""
self.changed.setStart(self.original.start)
def bumpOriginal(self, other):
if other.changed.start <= self.original.start:
self.original.bump(other.delta())
def bumpChanged(self, other):
"""Takes the other patch and assumes that it's in the same patch set.
When patches are grouped together, the line counts on the changed end
need to be incremented based on what has come before.
"""
if other.original.end < self.original.start:
self.changed.bump(other.delta())
def contextOverlap(self, other):
"""If other follows this, return the amount of overlap in the context parts.
If this is positive, the chunks will have to be merged for output.
"""
endOfSelf = self.original.end + len(self.postContext)
startOfOther = other.original.start - len(other.preContext)
return endOfSelf - startOfOther
def saveContext(line, context, pendingChunks):
"""save a line of context. sometimes this gives a pending chunk
enough trailing context to be complete, so return true when that happens
so that the chunk can be emitted"""
if context is not None:
context.append(line)
context = context[-contextLines:]
for chunk in pendingChunks:
if len(chunk.postContext) < contextLines:
chunk.postContext.append(line)
# only the first chunk will be finished, return true iff it is
return len(pendingChunks) > 0 and len(pendingChunks[0].postContext) >= contextLines
def parseDiff(input):
line = input.readline()
while line != '' and line[:3] != '---':
line = input.readline()
line = input.readline()
if line[:3] == '+++':
line = input.readline()
headerRegex = re.compile(r'^@@ -(\d+),\d+ \+(\d+),\d+ @@')
pendingChunks = deque()
while line != '':
operation, remainder = line[0], line[1:]
if operation == '@':
for chunk in pendingChunks:
yield chunk
pendingChunks.clear()
context = []
original = []
changed = []
m = headerRegex.match(line)
if m is None:
raise RuntimeError('can\'t parse @@ line')
originalLine, changedLine = map(int, (m.group(1), m.group(2)))
elif operation == '-':
original.append(remainder)
# don't add to context, so that we don't get original
# lines mixed up in there, we'll need to add these lines back later
# though in case there a multiple chunks in the one section
if saveContext(remainder, None, pendingChunks):
yield pendingChunks.popleft()
elif operation == '+':
changed.append(remainder)
elif operation == ' ':
if len(original) > 0 or len(changed) > 0:
pendingChunks.append(
DiffChunk(DiffLines(originalLine, original),
DiffLines(changedLine, changed),
context))
context += original
originalLine += len(original)
changedLine += len(changed)
original = []
changed = []
originalLine += 1
changedLine += 1
if saveContext(remainder, context, pendingChunks):
yield pendingChunks.popleft()
else:
raise RuntimeError('unknown diff character %s' % operation)
line = input.readline()
for chunk in pendingChunks:
yield chunk
def writeMergedChunks(chunks, output):
prev = None
totalOriginal = 0
totalChanged = 0
for c in chunks:
contextSize = len(c.preContext) + len(c.postContext)
if prev is not None:
contextSize -= prev.contextOverlap(c)
totalOriginal += c.original.count() + contextSize
totalChanged += c.changed.count() + contextSize
prev = c
output.write("@@ -%d,%d +%d,%d @@\n" % (chunks[0].original.start - len(chunks[0].preContext),
totalOriginal,
chunks[0].changed.start - len(chunks[0].preContext),
totalChanged))
prev = None
for c in chunks:
overlap = 0
if prev is not None:
overlap = prev.contextOverlap(c)
removed = min(len(prev.postContext), overlap)
overlap -= removed
context = prev.postContext[:-removed]
else:
context = []
context += c.preContext[overlap:]
for cline in context:
output.write(' ')
output.write(cline)
c.original.write(output, '-')
c.changed.write(output, '+')
prev = c
for cline in prev.postContext:
output.write(' ')
output.write(cline)
| martinthomson/blame-bridge | blame_bridge/diffu.py | Python | mit | 8,505 |
#!/usr/bin/env python
# encoding: utf-8
from cadnano.cnproxy import ProxyObject, ProxySignal
import cadnano.util as util
from cadnano.enum import StrandType
from cadnano.cnproxy import UndoStack, UndoCommand
from cadnano.strandset import StrandSet
from .removevhelixcmd import RemoveVirtualHelixCommand
class VirtualHelix(ProxyObject):
"""
VirtualHelix is a container class for two StrandSet objects (one scaffold
and one staple). The Strands all share the same helix axis. It is called
"virtual" because many different Strands (i.e. sub-oligos) combine to
form the "helix", just as many fibers may be braided together to
form a length of rope.
"""
def __init__(self, part, row, col, idnum=0):
self._doc = part.document()
super(VirtualHelix, self).__init__(part)
self._coord = (row, col) # col, row
self._part = part
self._scaf_strandset = StrandSet(StrandType.SCAFFOLD, self)
self._stap_strandset = StrandSet(StrandType.STAPLE, self)
# If self._part exists, it owns self._number
# in that only it may modify it through the
# private interface. The public interface for
# setNumber just routes the call to the parent
# dnapart if one is present. If self._part == None
# the virtualhelix owns self._number and may modify it.
self._number = None
self.setNumber(idnum)
# end def
def __repr__(self):
return "<%s(%d)>" % (self.__class__.__name__, self._number)
### SIGNALS ###
virtualHelixRemovedSignal = ProxySignal(ProxyObject, name='virtualHelixRemovedSignal') #pyqtSignal(QObject) # self
virtualHelixNumberChangedSignal = ProxySignal(ProxyObject, int, name='virtualHelixNumberChangedSignal') #pyqtSignal(QObject, int) # self, num
### SLOTS ###
### ACCESSORS ###
def scaf(self, idx):
""" Returns the strand at idx in self's scaffold, if any """
return self._scaf_strandset.getStrand(idx)
def stap(self, idx):
""" Returns the strand at idx in self's scaffold, if any """
return self._stap_strandset.getStrand(idx)
def coord(self):
return self._coord
# end def
def number(self):
return self._number
# end def
def part(self):
return self._part
# end def
def document(self):
return self._doc
# end def
def setNumber(self, number):
if self._number != number:
num_to_vh_dict = self._part._number_to_virtual_helix
# if self._number is not None:
num_to_vh_dict[self._number] = None
self._number = number
self.virtualHelixNumberChangedSignal.emit(self, number)
num_to_vh_dict[number] = self
# end def
def setPart(self, new_part):
self._part = new_part
self.setParent(new_part)
# end def
def scaffoldStrandSet(self):
return self._scaf_strandset
# end def
def stapleStrandSet(self):
return self._stap_strandset
# end def
def undoStack(self):
return self._part.undoStack()
# end def
### METHODS FOR QUERYING THE MODEL ###
def scaffoldIsOnTop(self):
return self.isEvenParity()
def getStrandSetByIdx(self, idx):
"""
This is a path-view-specific accessor
idx == 0 means top strand
idx == 1 means bottom strand
"""
if idx == 0:
if self.isEvenParity():
return self._scaf_strandset
else:
return self._stap_strandset
else:
if self.isEvenParity():
return self._stap_strandset
else:
return self._scaf_strandset
# end def
def getStrandSetByType(self, strand_type):
if strand_type == StrandType.SCAFFOLD:
return self._scaf_strandset
else:
return self._stap_strandset
# end def
def getStrandSets(self):
"""Return a tuple of the scaffold and staple StrandSets."""
return self._scaf_strandset, self._stap_strandset
# end def
def hasStrandAtIdx(self, idx):
"""Return a tuple for (Scaffold, Staple). True if
a strand is present at idx, False otherwise."""
return (self._scaf_strandset.hasStrandAt(idx, idx),\
self._stap_strandset.hasStrandAt(idx, idx))
# end def
def indexOfRightmostNonemptyBase(self):
"""Returns the rightmost nonempty base in either scaf of stap."""
return max(self._scaf_strandset.indexOfRightmostNonemptyBase(),\
self._stap_strandset.indexOfRightmostNonemptyBase())
# end def
def isDrawn5to3(self, strandset):
is_scaf = strandset == self._scaf_strandset
is_even = self.isEvenParity()
return is_even == is_scaf
# end def
def isEvenParity(self):
return self._part.isEvenParity(*self._coord)
# end def
def strandSetBounds(self, idx_helix, idx_type):
"""
forwards the query to the strandset
"""
return self.strandSet(idx_helix, idx_type).bounds()
# end def
### METHODS FOR EDITING THE MODEL ###
def destroy(self):
# QObject also emits a destroyed() Signal
self.setParent(None)
self.deleteLater()
# end def
def remove(self, use_undostack=True):
"""
Removes a VirtualHelix from the model. Accepts a reference to the
VirtualHelix, or a (row,col) lattice coordinate to perform a lookup.
"""
if use_undostack:
self.undoStack().beginMacro("Delete VirtualHelix")
self._scaf_strandset.remove(use_undostack)
self._stap_strandset.remove(use_undostack)
c = RemoveVirtualHelixCommand(self.part(), self)
if use_undostack:
self.undoStack().push(c)
self.undoStack().endMacro()
else:
c.redo()
# end def
### PUBLIC SUPPORT METHODS ###
def deepCopy(self, part):
"""
This only copies as deep as the VirtualHelix
strands get copied at the oligo and added to the Virtual Helix
"""
vh = VirtualHelix(part, self._number)
vh._coords = (self._coord[0], self._coord[1])
# If self._part exists, it owns self._number
# in that only it may modify it through the
# private interface. The public interface for
# setNumber just routes the call to the parent
# dnapart if one is present. If self._part == None
# the virtualhelix owns self._number and may modify it.
self._number = idnum
# end def
def getLegacyStrandSetArray(self, strand_type):
"""Called by legacyencoder."""
if strand_type == StrandType.SCAFFOLD:
return self._scaf_strandset.getLegacyArray()
else:
return self._stap_strandset.getLegacyArray()
def shallowCopy(self):
pass
# end def
# def translateCoords(self, deltaCoords):
# """
# for expanding a helix
# """
# deltaRow, deltaCol = deltaCoords
# row, col = self._coord
# self._coord = row + deltaRow, col + deltaCol
# # end def
# end class | JMMolenaar/cadnano2.5 | cadnano/virtualhelix/virtualhelix.py | Python | mit | 7,266 |
from implementation import *
from implementations import *
| drvinceknight/sklDj | sklDj/implementations/__init__.py | Python | mit | 59 |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="bar.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/hoverlabel/_namelengthsrc.py | Python | mit | 432 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'newmember.ui'
#
# Created: Sat Mar 29 19:36:48 2014
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_NewMember(object):
def setupUi(self, NewMember):
NewMember.setObjectName("NewMember")
NewMember.resize(370, 540)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(NewMember)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.efternamnLabel = QtWidgets.QLabel(NewMember)
self.efternamnLabel.setObjectName("efternamnLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.efternamnLabel)
self.surName_fld = QtWidgets.QLineEdit(NewMember)
self.surName_fld.setObjectName("surName_fld")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.surName_fld)
self.allaFRnamnLabel = QtWidgets.QLabel(NewMember)
self.allaFRnamnLabel.setObjectName("allaFRnamnLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.allaFRnamnLabel)
self.givenNames_fld = QtWidgets.QLineEdit(NewMember)
self.givenNames_fld.setObjectName("givenNames_fld")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.givenNames_fld)
self.tilltalsnamnLabel = QtWidgets.QLabel(NewMember)
self.tilltalsnamnLabel.setObjectName("tilltalsnamnLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.tilltalsnamnLabel)
self.preferredName_fld = QtWidgets.QLineEdit(NewMember)
self.preferredName_fld.setObjectName("preferredName_fld")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.preferredName_fld)
self.kNLabel = QtWidgets.QLabel(NewMember)
self.kNLabel.setObjectName("kNLabel")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.kNLabel)
self.gender_fld = QtWidgets.QComboBox(NewMember)
self.gender_fld.setObjectName("gender_fld")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.gender_fld)
self.birthDateLabel = QtWidgets.QLabel(NewMember)
self.birthDateLabel.setObjectName("birthDateLabel")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.birthDateLabel)
self.birthDate_fld = QtWidgets.QDateEdit(NewMember)
self.birthDate_fld.setObjectName("birthDate_fld")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.birthDate_fld)
self.adressLabel = QtWidgets.QLabel(NewMember)
self.adressLabel.setObjectName("adressLabel")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.adressLabel)
self.streetAddress_fld = QtWidgets.QLineEdit(NewMember)
self.streetAddress_fld.setObjectName("streetAddress_fld")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.streetAddress_fld)
self.postnummerLabel = QtWidgets.QLabel(NewMember)
self.postnummerLabel.setObjectName("postnummerLabel")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.postnummerLabel)
self.postalCode_fld = QtWidgets.QLineEdit(NewMember)
self.postalCode_fld.setObjectName("postalCode_fld")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.postalCode_fld)
self.postanstaltLabel = QtWidgets.QLabel(NewMember)
self.postanstaltLabel.setObjectName("postanstaltLabel")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.postanstaltLabel)
self.city_fld = QtWidgets.QLineEdit(NewMember)
self.city_fld.setObjectName("city_fld")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.city_fld)
self.telefonLabel = QtWidgets.QLabel(NewMember)
self.telefonLabel.setObjectName("telefonLabel")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.telefonLabel)
self.phone_fld = QtWidgets.QLineEdit(NewMember)
self.phone_fld.setObjectName("phone_fld")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.phone_fld)
self.emailLabel = QtWidgets.QLabel(NewMember)
self.emailLabel.setObjectName("emailLabel")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.emailLabel)
self.email_fld = QtWidgets.QLineEdit(NewMember)
self.email_fld.setObjectName("email_fld")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.email_fld)
self.avdelningLabel = QtWidgets.QLabel(NewMember)
self.avdelningLabel.setObjectName("avdelningLabel")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.avdelningLabel)
self.department_comboBox = QtWidgets.QComboBox(NewMember)
self.department_comboBox.setEditable(True)
self.department_comboBox.setObjectName("department_comboBox")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.FieldRole, self.department_comboBox)
self.anvNdarnamnLabel = QtWidgets.QLabel(NewMember)
self.anvNdarnamnLabel.setObjectName("anvNdarnamnLabel")
self.formLayout.setWidget(12, QtWidgets.QFormLayout.LabelRole, self.anvNdarnamnLabel)
self.username_fld = QtWidgets.QLineEdit(NewMember)
self.username_fld.setObjectName("username_fld")
self.formLayout.setWidget(12, QtWidgets.QFormLayout.FieldRole, self.username_fld)
self.gRTillPhuxLabel = QtWidgets.QLabel(NewMember)
self.gRTillPhuxLabel.setObjectName("gRTillPhuxLabel")
self.formLayout.setWidget(13, QtWidgets.QFormLayout.LabelRole, self.gRTillPhuxLabel)
self.makePhux_CheckBox = QtWidgets.QCheckBox(NewMember)
self.makePhux_CheckBox.setObjectName("makePhux_CheckBox")
self.formLayout.setWidget(13, QtWidgets.QFormLayout.FieldRole, self.makePhux_CheckBox)
self.studentId_label = QtWidgets.QLabel(NewMember)
self.studentId_label.setObjectName("studentId_label")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.LabelRole, self.studentId_label)
self.studentId_fld = QtWidgets.QLineEdit(NewMember)
self.studentId_fld.setObjectName("studentId_fld")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.FieldRole, self.studentId_fld)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(NewMember)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(NewMember)
self.buttonBox.accepted.connect(NewMember.accept)
self.buttonBox.rejected.connect(NewMember.reject)
QtCore.QMetaObject.connectSlotsByName(NewMember)
NewMember.setTabOrder(self.surName_fld, self.givenNames_fld)
NewMember.setTabOrder(self.givenNames_fld, self.preferredName_fld)
NewMember.setTabOrder(self.preferredName_fld, self.gender_fld)
NewMember.setTabOrder(self.gender_fld, self.birthDate_fld)
NewMember.setTabOrder(self.birthDate_fld, self.streetAddress_fld)
NewMember.setTabOrder(self.streetAddress_fld, self.postalCode_fld)
NewMember.setTabOrder(self.postalCode_fld, self.city_fld)
NewMember.setTabOrder(self.city_fld, self.phone_fld)
NewMember.setTabOrder(self.phone_fld, self.email_fld)
NewMember.setTabOrder(self.email_fld, self.department_comboBox)
NewMember.setTabOrder(self.department_comboBox, self.studentId_fld)
NewMember.setTabOrder(self.studentId_fld, self.username_fld)
NewMember.setTabOrder(self.username_fld, self.makePhux_CheckBox)
NewMember.setTabOrder(self.makePhux_CheckBox, self.buttonBox)
def retranslateUi(self, NewMember):
_translate = QtCore.QCoreApplication.translate
NewMember.setWindowTitle(_translate("NewMember", "Dialog"))
self.efternamnLabel.setText(_translate("NewMember", "Efternamn"))
self.allaFRnamnLabel.setText(_translate("NewMember", "Alla förnamn"))
self.tilltalsnamnLabel.setText(_translate("NewMember", "Tilltalsnamn"))
self.kNLabel.setText(_translate("NewMember", "Kön"))
self.birthDateLabel.setText(_translate("NewMember", "Födelsedatum"))
self.adressLabel.setText(_translate("NewMember", "Gatuadress"))
self.postnummerLabel.setText(_translate("NewMember", "Postnummer"))
self.postanstaltLabel.setText(_translate("NewMember", "Postanstalt"))
self.telefonLabel.setText(_translate("NewMember", "Telefon"))
self.emailLabel.setText(_translate("NewMember", "Email"))
self.avdelningLabel.setText(_translate("NewMember", "Avdelning"))
self.anvNdarnamnLabel.setText(_translate("NewMember", "Användarnamn"))
self.gRTillPhuxLabel.setText(_translate("NewMember", "Gör till Phux"))
self.studentId_label.setText(_translate("NewMember", "Studienummer"))
| Teknologforeningen/svaksvat | ui/newmember.py | Python | mit | 9,586 |
# Copyright (c) 2018, Ansible Project
from ansiblelint.rules import AnsibleLintRule
class MetaChangeFromDefaultRule(AnsibleLintRule):
id = '703'
shortdesc = 'meta/main.yml default values should be changed'
field_defaults = [
('author', 'your name'),
('description', 'your description'),
('company', 'your company (optional)'),
('license', 'license (GPLv2, CC-BY, etc)'),
('license', 'license (GPL-2.0-or-later, MIT, etc)'),
]
description = (
'meta/main.yml default values should be changed for: ``{}``'.format(
', '.join(f[0] for f in field_defaults)
)
)
severity = 'HIGH'
tags = ['metadata']
version_added = 'v4.0.0'
def matchplay(self, file, data):
if file['type'] != 'meta':
return False
galaxy_info = data.get('galaxy_info', None)
if not galaxy_info:
return False
results = []
for field, default in self.field_defaults:
value = galaxy_info.get(field, None)
if value and value == default:
results.append(({'meta/main.yml': data},
'Should change default metadata: %s' % field))
return results
| willthames/ansible-lint | lib/ansiblelint/rules/MetaChangeFromDefaultRule.py | Python | mit | 1,254 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Error(Model):
"""Error.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~azure.mgmt.network.v2016_09_01.models.ErrorDetails]
:param inner_error:
:type inner_error: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
'inner_error': {'key': 'innerError', 'type': 'str'},
}
def __init__(self, *, code: str=None, message: str=None, target: str=None, details=None, inner_error: str=None, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
self.inner_error = inner_error
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/error_py3.py | Python | mit | 1,474 |
from distutils.core import setup, Command
from pyvx import __version__
import sys
class PyTestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main()
sys.exit(errno)
setup(
name='PyVX',
description='OpenVX python support',
long_description='''
PyVX is a set of python bindings for `OpenVX`_. `OpenVX`_ is a standard for
expressing computer vision processing algorithms as a graph of function nodes.
This graph is verified once and can then be processed (executed) multiple
times. PyVX allows these graphs to be constructed and interacted with from
python. It also supports the use of multiple `OpenVX`_ backends, both C and
python backends. It also used to contain a code generating `OpenVX`_ backend
written it python, but it will be moved to a package of it's own (curently
it lives on the try1 branch of pyvx).
Further details are provided in the `Documentation`_
.. _`OpenVX`: https://www.khronos.org/openvx
.. _`Documentation`: https://pyvx.readthedocs.org
''',
version=__version__,
packages=['pyvx', 'pyvx.backend'],
package_data={'pyvx': ['cdefs/vx_api.h',
'cdefs/vx.h',
'cdefs/vx_kernels.h',
'cdefs/vx_nodes.h',
'cdefs/vx_types.h',
'cdefs/vx_vendors.h',
]},
zip_safe=False,
url='http://pyvx.readthedocs.org',
author='Hakan Ardo',
author_email='pyvx@googlegroups.com',
license='MIT',
install_requires=['cffi'],
cmdclass={'test': PyTestCommand},
tests_require=['pytest'],
)
| hakanardo/pyvx | setup.py | Python | mit | 1,768 |
from __future__ import annotations
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
def __repr__(self):
"""Returns a visual representation of the node and all its following nodes."""
string_rep = []
temp = self
while temp:
string_rep.append(f"{temp.data}")
temp = temp.next
return "->".join(string_rep)
def make_linked_list(elements_list: list):
"""Creates a Linked List from the elements of the given sequence
(list/tuple) and returns the head of the Linked List.
>>> make_linked_list([])
Traceback (most recent call last):
...
Exception: The Elements List is empty
>>> make_linked_list([7])
7
>>> make_linked_list(['abc'])
abc
>>> make_linked_list([7, 25])
7->25
"""
if not elements_list:
raise Exception("The Elements List is empty")
current = head = Node(elements_list[0])
for i in range(1, len(elements_list)):
current.next = Node(elements_list[i])
current = current.next
return head
def print_reverse(head_node: Node) -> None:
"""Prints the elements of the given Linked List in reverse order
>>> print_reverse([])
>>> linked_list = make_linked_list([69, 88, 73])
>>> print_reverse(linked_list)
73
88
69
"""
if head_node is not None and isinstance(head_node, Node):
print_reverse(head_node.next)
print(head_node.data)
def main():
from doctest import testmod
testmod()
linked_list = make_linked_list([14, 52, 14, 12, 43])
print("Linked List:")
print(linked_list)
print("Elements in Reverse:")
print_reverse(linked_list)
if __name__ == "__main__":
main()
| TheAlgorithms/Python | data_structures/linked_list/print_reverse.py | Python | mit | 1,768 |
"""
The algorithm finds the pattern in given text using following rule.
The bad-character rule considers the mismatched character in Text.
The next occurrence of that character to the left in Pattern is found,
If the mismatched character occurs to the left in Pattern,
a shift is proposed that aligns text block and pattern.
If the mismatched character does not occur to the left in Pattern,
a shift is proposed that moves the entirety of Pattern past
the point of mismatch in the text.
If there no mismatch then the pattern matches with text block.
Time Complexity : O(n/m)
n=length of main string
m=length of pattern string
"""
from __future__ import annotations
class BoyerMooreSearch:
def __init__(self, text: str, pattern: str):
self.text, self.pattern = text, pattern
self.textLen, self.patLen = len(text), len(pattern)
def match_in_pattern(self, char: str) -> int:
"""finds the index of char in pattern in reverse order
Parameters :
char (chr): character to be searched
Returns :
i (int): index of char from last in pattern
-1 (int): if char is not found in pattern
"""
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def mismatch_in_text(self, currentPos: int) -> int:
"""
find the index of mis-matched character in text when compared with pattern
from last
Parameters :
currentPos (int): current index position of text
Returns :
i (int): index of mismatched char from last in text
-1 (int): if there is no mismatch between pattern and text block
"""
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[currentPos + i]:
return currentPos + i
return -1
def bad_character_heuristic(self) -> list[int]:
# searches pattern in text and returns index positions
positions = []
for i in range(self.textLen - self.patLen + 1):
mismatch_index = self.mismatch_in_text(i)
if mismatch_index == -1:
positions.append(i)
else:
match_index = self.match_in_pattern(self.text[mismatch_index])
i = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
text = "ABAABA"
pattern = "AB"
bms = BoyerMooreSearch(text, pattern)
positions = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| TheAlgorithms/Python | strings/boyer_moore_search.py | Python | mit | 2,738 |
__all__ = [
'json_schema',
]
import lollipop.types as lt
import lollipop.validators as lv
from lollipop.utils import identity
from collections import OrderedDict
from .compat import iteritems
def find_validators(schema, validator_type):
return [validator
for validator in schema.validators
if isinstance(validator, validator_type)]
def json_schema(schema):
"""Convert Lollipop schema to JSON schema"""
js = OrderedDict()
if schema.name:
js['title'] = schema.name
if schema.description:
js['description'] = schema.description
any_of_validators = find_validators(schema, lv.AnyOf)
if any_of_validators:
choices = set(any_of_validators[0].choices)
for validator in any_of_validators[1:]:
choices = choices.intersection(set(validator.choices))
if not choices:
raise ValueError('AnyOf constraints choices does not allow any values')
js['enum'] = list(schema.dump(choice) for choice in choices)
return js
none_of_validators = find_validators(schema, lv.NoneOf)
if none_of_validators:
choices = set(none_of_validators[0].values)
for validator in none_of_validators[1:]:
choices = choices.union(set(validator.values))
if choices:
js['not'] = {'enum': list(schema.dump(choice) for choice in choices)}
if isinstance(schema, lt.Any):
pass
elif isinstance(schema, lt.String):
js['type'] = 'string'
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minLength'] = max(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxLength'] = min(v.exact or v.max for v in length_validators)
regexp_validators = find_validators(schema, lv.Regexp)
if regexp_validators:
js['pattern'] = regexp_validators[0].regexp.pattern
elif isinstance(schema, lt.Number):
if isinstance(schema, lt.Integer):
js['type'] = 'integer'
else:
js['type'] = 'number'
range_validators = find_validators(schema, lv.Range)
if range_validators:
if any(v.min for v in range_validators):
js['minimum'] = max(v.min for v in range_validators if v.min)
if any(v.max for v in range_validators):
js['maximum'] = min(v.max for v in range_validators if v.max)
elif isinstance(schema, lt.Boolean):
js['type'] = 'boolean'
elif isinstance(schema, lt.List):
js['type'] = 'array'
js['items'] = json_schema(schema.item_type)
length_validators = find_validators(schema, lv.Length)
if length_validators:
if any(v.min for v in length_validators) or \
any(v.exact for v in length_validators):
js['minItems'] = min(v.exact or v.min for v in length_validators)
if any(v.max for v in length_validators) or \
any(v.exact for v in length_validators):
js['maxItems'] = min(v.exact or v.max for v in length_validators)
unique_validators = find_validators(schema, lv.Unique)
if unique_validators and any(v.key is identity for v in unique_validators):
js['uniqueItems'] = True
elif isinstance(schema, lt.Tuple):
js['type'] = 'array'
js['items'] = [json_schema(item_type) for item_type in schema.item_types]
elif isinstance(schema, lt.Object):
js['type'] = 'object'
js['properties'] = OrderedDict(
(k, json_schema(v.field_type))
for k, v in iteritems(schema.fields)
)
required = [
k
for k, v in iteritems(schema.fields)
if not isinstance(v.field_type, lt.Optional)
]
if required:
js['required'] = required
if schema.allow_extra_fields in [True, False]:
js['additionalProperties'] = schema.allow_extra_fields
elif isinstance(schema.allow_extra_fields, lt.Field):
field_type = schema.allow_extra_fields.field_type
if isinstance(field_type, lt.Any):
js['additionalProperties'] = True
else:
js['additionalProperties'] = json_schema(field_type)
elif isinstance(schema, lt.Dict):
js['type'] = 'object'
fixed_properties = schema.value_types \
if hasattr(schema.value_types, 'keys') else {}
properties = OrderedDict(
(k, json_schema(v))
for k, v in iteritems(fixed_properties)
)
if properties:
js['properties'] = properties
required = [
k
for k, v in iteritems(fixed_properties)
if not isinstance(v, lt.Optional)
]
if required:
js['required'] = required
if hasattr(schema.value_types, 'default'):
js['additionalProperties'] = json_schema(schema.value_types.default)
elif isinstance(schema, lt.Constant):
js['const'] = schema.value
elif isinstance(schema, lt.Optional):
js.update(json_schema(schema.inner_type))
default = schema.load_default()
if default:
js['default'] = schema.inner_type.dump(default)
elif hasattr(schema, 'inner_type'):
js.update(json_schema(schema.inner_type))
return js
| akscram/lollipop-jsonschema | lollipop_jsonschema/jsonschema.py | Python | mit | 5,661 |
compl_iupacdict = {'A':'T',
'C':'G',
'G':'C',
'T':'A',
'M':'K',
'R':'Y',
'W':'W',
'S':'S',
'Y':'R',
'K':'M',
'V':'B',
'H':'D',
'D':'H',
'B':'V',
'X':'X',
'N':'N'}
def compliment(seq, compl_iupacdict):
compl_seq = ""
for i in range(0,len(seq)):
letter = seq[i]
compl_seq = compl_seq + compl_iupacdict[letter]
return compl_seq
def reverse(text):
return text[::-1]
def revcomp(seq):
revCompSeq = reverse(compliment(seq, compl_iupacdict))
return revCompSeq
#=========================================================================
def iupacList_2_regExList(motifList):
i = 0
while i < len(motifList):
motifList[i] = [motifList[i], iupac2regex(motifList[i])]
i += 1
def iupac2regex(motif):
iupacdict = {'A':'A',
'C':'C',
'G':'G',
'T':'T',
'M':'[AC]',
'R':'[AG]',
'W':'[AT]',
'S':'[CG]',
'Y':'[CT]',
'K':'[GT]',
'V':'[ACG]',
'H':'[ACT]',
'D':'[AGT]',
'B':'[CGT]',
'X':'[ACGT]',
'N':'[ACGT]'}
transl_motif = ""
for i in range(0,len(motif)):
letter = motif[i]
transl_motif = transl_motif + iupacdict[letter]
return transl_motif | xguse/spartan | src/spartan/utils/seqs.py | Python | mit | 1,664 |
import lief
import sys
import os
import traceback
import configparser
import struct
from collections import OrderedDict
# Opcodes
X86_PUSH_BYTE = 0x6a
X86_32_PUSH_DWORD = 0x68
x86_32_CALL = [0xff, 0x15]
X86_64_CALL = [0xff, 0xd0]
X86_64_MOV_R9 = [0x49, 0xb9]
X86_64_MOV_R8 = [0x49, 0xb8]
X86_64_MOV_RDX = [0x48, 0xba]
X86_64_MOV_RCX = [0x48, 0xb9]
X86_64_MOV_RAX = [0x48, 0xc7, 0xc0]
def get_config(conf_file="dololi.conf"):
assert os.path.isfile(conf_file)
conf = configparser.ConfigParser()
conf.read(conf_file)
return conf
def is_dll(pe_file):
return pe_file.header.has_characteristic(lief.PE.HEADER_CHARACTERISTICS.DLL)
def get_pe_type(arch):
assert arch == "32" or arch == "64"
if arch == "32":
return lief.PE.PE_TYPE.PE32
else:
return lief.PE.PE_TYPE.PE32_PLUS
def is_64_bits(pe_type):
return pe_type == lief.PE.PE_TYPE.PE32_PLUS
def get_reg_by_argn(argn):
return {
"1": "r9",
"2": "r8",
"3": "rdx",
"4": "rcx",
"5": "rax"
}[argn]
def get_opcodes_by_reg(reg):
return {
"r9" : X86_64_MOV_R9,
"r8" : X86_64_MOV_R8,
"rdx": X86_64_MOV_RDX,
"rcx": X86_64_MOV_RCX,
"rax": X86_64_MOV_RAX
}[reg]
def dololi(arch, conf, out_file_name):
code_rva = int(conf["DEFAULT"].get("CODE_RVA"))
data_rva = int(conf["DEFAULT"].get("DATA_RVA"))
pe_type = get_pe_type(arch)
is_64bits = is_64_bits(pe_type)
pe_loader = lief.PE.Binary("dololi", pe_type)
code_cnt,\
reg_size,\
pack_fmt = ([], 8, "<Q") if is_64bits else ([], 4, "<I")
data_cnt = ""
data_off = 0
reg_cnt = 1
func_num = 0
funcs = OrderedDict()
# Parse CODE and DATA contents from config file
for k, v in conf["CODE"].items():
if k.endswith("_byte"):
value = int(v)
value = struct.pack("<B", value)
code_cnt.extend([X86_PUSH_BYTE, value[0]])
elif k.endswith("_word"):
value = int(v)
value = struct.pack("<H", value)
code_cnt.extend([X86_32_PUSH_DWORD, value[0], value[1], 0x0, 0x0])
elif k.endswith("_dword") or k.endswith("_qword"):
reg_size, pack_fmt = {"dword":(4, "<I"), "qword":(8, "<Q")}[k.split('_')[-1]]
if v.lower().endswith("_data"):
data_key = v.lower().rstrip("_data")
assert "str" in data_key.lower(), "Data should contain arrays or strings"
data_value = conf["DATA"][data_key] + '\0'
data_cnt += data_value
addr = struct.pack(pack_fmt, pe_loader.optional_header.imagebase + data_rva + data_off)
if is_64bits:
code_cnt.extend(get_opcodes_by_reg(get_reg_by_argn(str(reg_cnt))))
reg_cnt = (reg_cnt % 4) + 1
if reg_size < 8:
addr += bytes("\x00" * (8 - reg_size), 'ascii')
code_cnt.extend(list(addr))
else:
code_cnt.extend([X86_32_PUSH_DWORD])
code_cnt.extend(list(addr))
data_off += len(data_value)
else:
value = int(v)
value = struct.pack(pack_fmt, value)
if is_64bits:
code_cnt.extend(get_opcodes_by_reg(get_reg_by_argn(str(reg_cnt))))
reg_cnt = (reg_cnt % 4) + 1
if reg_size < 8:
value += [0x0] * (8 - reg_size)
code_cnt.extend(list(value))
else:
code_cnt.extend([X86_32_PUSH_DWORD])
code_cnt.extend(list(value))
elif k.endswith("_func"):
assert len(v.split(';')) == 2, "DLL name;Export function name"
dll_name, export_name = v.strip("\r\n").split(';')
dll = pe_loader.add_library(dll_name)
dll.add_entry(export_name)
func_num_str = "".join(["FUNC_", str(func_num)])
if is_64bits:
code_cnt.extend(get_opcodes_by_reg(get_reg_by_argn("5")))
reg_cnt = (reg_cnt % 4) + 1
else:
code_cnt.extend(x86_32_CALL)
for i in range(4):
code_cnt.append(func_num_str)
if is_64bits:
code_cnt.extend(X86_64_CALL)
if dll_name not in funcs:
funcs[dll_name] = set()
funcs[dll_name].add((export_name, func_num_str))
func_num += 1
else:
# code_rva and data_rva from DEFAULT section
pass
# Add function addresses
for k, v in funcs.items():
for f in v:
func_addr = pe_loader.predict_function_rva(k, f[0])
offset = code_rva if func_num == 1 else 0 # dirty hack to adjust function address
addr = struct.pack(pack_fmt, pe_loader.optional_header.imagebase + data_rva - offset + func_addr)
# TO DO, number of bytes should be adjusted automatically
for i in range(4):
code_cnt[code_cnt.index(f[1])] = addr[i]
# set .text section fields
text_sect = lief.PE.Section(".text")
text_sect.virtual_address = code_rva
text_sect.content = code_cnt
text_sect = pe_loader.add_section(text_sect, lief.PE.SECTION_TYPES.TEXT)
# set .data section fields
data_sect = lief.PE.Section(".data")
data_sect.virtual_address = data_rva
data_sect.content = list(map(ord, data_cnt))
data_sect = pe_loader.add_section(data_sect, lief.PE.SECTION_TYPES.DATA)
pe_loader.optional_header.addressof_entrypoint = text_sect.virtual_address
builder = lief.PE.Builder(pe_loader)
builder.build_imports(True)
builder.build()
builder.write(out_file_name)
print("{0} was successfully created!".format(out_file_name))
if __name__ == "__main__":
assert len(sys.argv) > 1, "Usage: {0} <32|64> [Output file name]".format(sys.argv[0])
if sys.argv[1] not in ("32", "64"):
print("Use 32 to build x86_32 bit or 64 for x86_64 bit loader")
sys.exit(1)
dololi(sys.argv[1], get_config(), "dololi.exe" if len(sys.argv) < 3 else sys.argv[2])
| ner0x652/RElief | dololi/dololi.py | Python | mit | 6,472 |
import argparse
import sys
import pytest
from pyscaffold import extensions
from pyscaffold.exceptions import ErrorLoadingExtension
from .extensions import __name__ as test_extensions_pkg
from .extensions.helpers import make_extension
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import EntryPoint # pragma: no cover
else:
from importlib_metadata import EntryPoint # pragma: no cover
def test_extension():
parser = argparse.ArgumentParser()
extension = make_extension("MyExtension")
extension.augment_cli(parser)
opts = vars(parser.parse_args(["--my-extension"]))
assert opts["extensions"] == [extension]
def test_extension_append():
parser = argparse.ArgumentParser()
extension1 = make_extension("MyExtension1")
extension2 = make_extension("MyExtension2")
parser.set_defaults(extensions=[extension1])
extension2.augment_cli(parser)
opts = vars(parser.parse_args(["--my-extension2"]))
assert opts["extensions"] == [extension1, extension2]
def test_include():
parser = argparse.ArgumentParser()
my_extensions = [make_extension(f"MyExtension{n}") for n in range(7)]
parser.add_argument("--opt", nargs=0, action=extensions.include(*my_extensions))
opts = vars(parser.parse_args(["--opt"]))
assert opts["extensions"] == my_extensions
def test_store_with():
parser = argparse.ArgumentParser()
my_extensions = [make_extension(f"MyExtension{n}") for n in range(7)]
parser.add_argument("--opt", action=extensions.store_with(*my_extensions))
opts = vars(parser.parse_args(["--opt", "42"]))
assert opts["extensions"] == my_extensions
assert opts["opt"] == "42"
def test_store_with_type():
parser = argparse.ArgumentParser()
my_extensions = [make_extension(f"MyExtension{n}") for n in range(7)]
parser.add_argument("--opt", type=int, action=extensions.store_with(*my_extensions))
opts = vars(parser.parse_args(["--opt", "42"]))
assert opts["extensions"] == my_extensions
assert opts["opt"] == 42
def test_load_from_entry_point__error():
# This module does not exist, so Python will have some trouble loading it
# EntryPoint(name, value, group)
fake = EntryPoint("fake", "pyscaffoldext.SOOOOO___fake___:Fake", "pyscaffold.cli")
with pytest.raises(ErrorLoadingExtension):
extensions.load_from_entry_point(fake)
def test_load_from_entry_point__old_api():
# The following module/class exists but uses an old version of the extensions API
# therefore, we should have a meaningful error when trying to load it.
entry = f"{test_extensions_pkg}.incompatible_v3_api_fake_extension:FakeExtension"
fake = EntryPoint("fake", entry, "pyscaffold.cli")
with pytest.raises(ErrorLoadingExtension):
extensions.load_from_entry_point(fake)
def test_iterate_entry_points():
ext_iter = extensions.iterate_entry_points()
assert hasattr(ext_iter, "__iter__")
ext_list = list(ext_iter)
name_list = [e.name for e in ext_list]
for ext in ("cirrus", "pre_commit", "no_skeleton", "namespace", "venv"):
assert ext in name_list
def test_list_from_entry_points():
# Should return a list with all the extensions registered in the entrypoints
ext_list = extensions.list_from_entry_points()
orig_len = len(ext_list)
assert all(isinstance(e, extensions.Extension) for e in ext_list)
name_list = [e.name for e in ext_list]
for ext in ("cirrus", "pre_commit", "no_skeleton", "namespace", "venv"):
assert ext in name_list
# a filtering function can be passed to avoid loading extensions that are not needed
ext_list = extensions.list_from_entry_points(filtering=lambda e: e.name != "cirrus")
name_list = [e.name for e in ext_list]
assert len(ext_list) == orig_len - 1
assert "cirrus" not in name_list
| blue-yonder/pyscaffold | tests/test_extensions.py | Python | mit | 3,932 |
#inherits from standard local settings
from bukkakegram.settings import *
import dj_database_url
DATABASES['default'] = dj_database_url.config()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['.herokuapp.com', '*',]
DEBUG = config('DEBUG', cast=bool)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Static files deploy in S3 AWS
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = config('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
# AWS_LOCATION = 'static'
# STATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
# STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# media upload files on S3 AWS
DEFAULT_FILE_STORAGE = 'bukkakegram.storage_backends.MediaStorage'
# force overwrite speed up AWS
THUMBNAIL_FORCE_OVERWRITE = True
# RUN MAILER
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_PORT = config('EMAIL_PORTS')
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
| delitamakanda/BukkakeGram | bukkakegram/settings_production.py | Python | mit | 1,341 |
#! /usr/bin/python3
# -*- coding:utf-8 -*-
# lagos
# by antoine@2ohm.fr
"""
Play a little with imap.
"""
import imaplib
import smtplib
from lib.remail import remail
from lib.redoc import redoc
import yaml
import sys
import os
import logging
# Config the log machine
logging.basicConfig(
format='\033[90m[%(levelname)s] %(message)s\033[0m',
level=logging.DEBUG)
# Load the configuration
try:
with open("lagos.conf", 'r') as f:
config = yaml.load(f)
except IOError:
logging.error( "Please use lagos.conf.default to set a " \
"valid lagos.conf file.")
sys.exit()
def sendMail(fromaddr, toaddr, mss, config):
"""Send a mail.
"""
S = smtplib.SMTP(config.get('smtp_server', ''))
S.login(config.get('smtp_user', ''),
config.get('smtp_password', ''))
S.sendmail(fromaddr, toaddr, mss)
S.quit()
# Define aliases
pool = config.get('pool', '/tmp/pool/')
# Init the connexion with the IMAP server
logging.info('Connect to the IMAP server')
M = imaplib.IMAP4_SSL(config.get('imap_server', ''))
M.login(config.get('imap_user', ''),
config.get('imap_password', ''))
M.select()
# Fetch all mails
typ, data = M.search(None, 'ALL')
for num in data[0].split():
# Flag the message as read
M.store(num, '+FLAGS', '\\Seen')
typ, data = M.fetch(num, '(RFC822)')
# Decode the message
m = remail(data)
filenames = m.save_attachment(pool)
sender = m.ffrom
subject = m.subject
content = m.content
print('from: %s' % sender)
print('about: %s' % subject)
print('%s' % content)
print(filenames)
for filename in filenames:
# Create a redoc
path = os.path.join(pool, filename)
d = redoc(path)
# Look for additional metadata
allMeta = yaml.load(content)
if isinstance(allMeta, dict):
newMeta = allMeta.get(filename, allMeta)
d.meta_add(newMeta)
d.meta_write()
#logging.info('Send a message back')
#sendMail(config.get('imap_user', ''), sender, 'Hello back.', config)
M.close()
M.logout()
| a2ohm/lagos | lagos.py | Python | mit | 2,116 |
#!/usr/bin/env python
from exceptions import KeyError
import os
import requests
class GoogleDoc(object):
"""
A class for accessing a Google document as an object.
Includes the bits necessary for accessing the document and auth and such.
For example:
doc = {
"key": "123456abcdef",
"file_name": "my_google_doc",
"gid": "2"
}
g = GoogleDoc(**doc)
g.get_auth()
g.get_document()
Will download your google doc to data/file_name.format.
"""
# You can update these values with kwargs.
# In fact, you better pass a key or else it won't work!
key = None
file_format = 'xlsx'
file_name = 'copy'
gid = '0'
# You can change these with kwargs but it's not recommended.
spreadsheet_url = 'https://spreadsheets.google.com/feeds/download/spreadsheets/Export?key=%(key)s&exportFormat=%(format)s&gid=%(gid)s'
new_spreadsheet_url = 'https://docs.google.com/spreadsheets/d/%(key)s/export?format=%(format)s&id=%(key)s&gid=%(gid)s'
auth = None
email = os.environ.get('APPS_GOOGLE_EMAIL', None)
password = os.environ.get('APPS_GOOGLE_PASS', None)
scope = "https://spreadsheets.google.com/feeds/"
service = "wise"
session = "1"
def __init__(self, **kwargs):
"""
Because sometimes, just sometimes, you need to update the class when you instantiate it.
In this case, we need, minimally, a document key.
"""
if kwargs:
if kwargs.items():
for key, value in kwargs.items():
setattr(self, key, value)
def get_auth(self):
"""
Gets an authorization token and adds it to the class.
"""
data = {}
if not self.email or not self.password:
raise KeyError("Error! You're missing some variables. You need to export APPS_GOOGLE_EMAIL and APPS_GOOGLE_PASS.")
else:
data['Email'] = self.email
data['Passwd'] = self.password
data['scope'] = self.scope
data['service'] = self.service
data['session'] = self.session
r = requests.post("https://www.google.com/accounts/ClientLogin", data=data)
self.auth = r.content.split('\n')[2].split('Auth=')[1]
def get_document(self):
"""
Uses the authentication token to fetch a google doc.
"""
# Handle basically all the things that can go wrong.
if not self.auth:
raise KeyError("Error! You didn't get an auth token. Something very bad happened. File a bug?")
elif not self.key:
raise KeyError("Error! You forgot to pass a key to the class.")
else:
headers = {}
headers['Authorization'] = "GoogleLogin auth=%s" % self.auth
url_params = { 'key': self.key, 'format': self.file_format, 'gid': self.gid }
url = self.spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
url = self.new_spreadsheet_url % url_params
r = requests.get(url, headers=headers)
if r.status_code != 200:
raise KeyError("Error! Your Google Doc does not exist.")
with open('data/%s.%s' % (self.file_name, self.file_format), 'wb') as writefile:
writefile.write(r.content)
| nprapps/visits | etc/gdocs.py | Python | mit | 3,436 |
#!/usr/bin/env python
# coding: utf-8
"""阻塞和非阻塞的 HTTP 客户端接口.
这个模块定义了一个被两种实现方式 ``simple_httpclient`` 和
``curl_httpclient`` 共享的通用接口 . 应用程序可以选择直接实例化相对应的实现类,
或使用本模块提供的 `AsyncHTTPClient` 类, 通过复写
`AsyncHTTPClient.configure` 方法来选择一种实现 .
默认的实现是 ``simple_httpclient``, 这可以能满足大多数用户的需要 . 然而, 一
些应用程序可能会因为以下原因想切换到 ``curl_httpclient`` :
* ``curl_httpclient`` 有一些 ``simple_httpclient`` 不具有的功能特性,
包括对 HTTP 代理和使用指定网络接口能力的支持.
* ``curl_httpclient`` 更有可能与不完全符合 HTTP 规范的网站兼容, 或者与
使用很少使用 HTTP 特性的网站兼容.
* ``curl_httpclient`` 更快.
* ``curl_httpclient`` 是 Tornado 2.0 之前的默认值.
注意, 如果你正在使用 ``curl_httpclient``, 强力建议你使用最新版本的
``libcurl`` 和 ``pycurl``. 当前 libcurl 能被支持的最小版本是
7.21.1, pycurl 能被支持的最小版本是 7.18.2. 强烈建议你所安装的 ``libcurl``
是和异步 DNS 解析器 (threaded 或 c-ares) 一起构建的,
否则你可能会遇到各种请求超时的问题 (更多信息请查看
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
和 curl_httpclient.py 里面的注释).
为了选择 ``curl_httpclient``, 只需要在启动的时候调用
`AsyncHTTPClient.configure` ::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str
from tornado import httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
class HTTPClient(object):
"""一个阻塞的 HTTP 客户端.
提供这个接口是为了方便使用和测试; 大多数运行于 IOLoop 的应用程序
会使用 `AsyncHTTPClient` 来替代它.
一般的用法就像这样 ::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print response.body
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
"""
def __init__(self, async_client_class=None, **kwargs):
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
self._closed = False
def __del__(self):
self.close()
def close(self):
"""关闭该 HTTPClient, 释放所有使用的资源."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(self, request, **kwargs):
"""执行一个请求, 返回一个 `HTTPResponse` 对象.
该请求可以是一个 URL 字符串或是一个 `HTTPRequest` 对象.
如果它是一个字符串, 我们会使用任意关键字参数构造一个
`HTTPRequest` : ``HTTPRequest(request, **kwargs)``
如果在 fetch 过程中发生错误, 我们将抛出一个 `HTTPError` 除非
``raise_error`` 关键字参数被设置为 False.
"""
response = self._io_loop.run_sync(functools.partial(
self._async_client.fetch, request, **kwargs))
return response
class AsyncHTTPClient(Configurable):
"""一个非阻塞 HTTP 客户端.
使用示例::
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
http_client = AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_request)
这个类的构造器有几个比较神奇的考虑: 它实际创建了一个基于特定实现的子
类的实例, 并且该实例被作为一种伪单例重用 (每一个 `.IOLoop` ).
使用关键字参数 ``force_instance=True`` 可以用来限制这种单例行为.
只有使用了 ``force_instance=True`` 时候, 才可以传递 ``io_loop`` 以外其他
的参数给 `AsyncHTTPClient` 构造器.
实现的子类以及它的构造器的参数可以通过静态方法 `configure()` 设置.
所有 `AsyncHTTPClient` 实现都支持一个 ``defaults`` 关键字参数,
可以被用来设置默认 `HTTPRequest` 属性的值. 例如::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1
``io_loop`` 参数被废弃.
"""
@classmethod
def configurable_base(cls):
return AsyncHTTPClient
@classmethod
def configurable_default(cls):
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls):
attr_name = '_async_client_dict_' + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self):
"""销毁该 HTTP 客户端, 释放所有被使用的文件描述符.
因为 `AsyncHTTPClient` 对象透明重用的方式, 该方法
**在正常使用时并不需要** .
``close()`` 一般只有在 `.IOLoop` 也被关闭, 或在创建
`AsyncHTTPClient` 的时候使用了 ``force_instance=True`` 参数才需要.
在 `AsyncHTTPClient` 调用 ``close()`` 方法后, 其他方法就不能被调用
了.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
if self._instance_cache.get(self.io_loop) is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
del self._instance_cache[self.io_loop]
def fetch(self, request, callback=None, raise_error=True, **kwargs):
"""执行一个请求, 并且异步的返回 `HTTPResponse`.
request 参数可以是一个 URL 字符串也可以是一个 `HTTPRequest` 对象.
如果是一个字符串, 我们将使用全部的关键字参数一起构造一个
`HTTPRequest` 对象: ``HTTPRequest(request, **kwargs)``
这个方法返回一个结果为 `HTTPResponse` 的 `.Future` 对象.
默认情况下, 如果该请求返回一个非 200 的响应码, 这个 ``Future``
将会抛出一个 `HTTPError` 错误. 相反, 如果 ``raise_error`` 设置为
False, 则无论响应码如何, 都将返回该 response (响应).
如果给定了 ``callback`` , 它将被 `HTTPResponse` 调用.
在回调接口中, `HTTPError` 不会自动抛出. 相反你必须检查该响应的
``error`` 属性或者调用它的 `~HTTPResponse.rethrow` 方法.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
if callback is not None:
callback = stack_context.wrap(callback)
def handle_future(future):
exc = future.exception()
if isinstance(exc, HTTPError) and exc.response is not None:
response = exc.response
elif exc is not None:
response = HTTPResponse(
request, 599, error=exc,
request_time=time.time() - request.start_time)
else:
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
def handle_response(response):
if raise_error and response.error:
future.set_exception(response.error)
else:
future.set_result(response)
self.fetch_impl(request, handle_response)
return future
def fetch_impl(self, request, callback):
raise NotImplementedError()
@classmethod
def configure(cls, impl, **kwargs):
"""配置要使用的 `AsyncHTTPClient` 子类.
``AsyncHTTPClient()`` 实际上是创建一个子类的实例.
此方法可以使用一个类对象或此类的完全限定名称(或为 ``None`` 则使用默认的,
``SimpleAsyncHTTPClient``) 调用.
如果给定了额外的关键字参数, 它们将会被传递给创建的每个子类实例的
构造函数. 关键字参数 ``max_clients`` 确定了可以在每个 `.IOLoop` 上
并行执行的 `~AsyncHTTPClient.fetch()` 操作的最大数量. 根据使用的
实现类不同, 可能支持其他参数.
例如::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP 客户端请求对象."""
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password='',
allow_nonstandard_methods=False,
validate_cert=True)
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None, auth_mode=None,
connect_timeout=None, request_timeout=None,
if_modified_since=None, follow_redirects=None,
max_redirects=None, user_agent=None, use_gzip=None,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
proxy_host=None, proxy_port=None, proxy_username=None,
proxy_password=None, allow_nonstandard_methods=None,
validate_cert=None, ca_certs=None,
allow_ipv6=None,
client_key=None, client_cert=None, body_producer=None,
expect_100_continue=False, decompress_response=None,
ssl_options=None):
r"""除了 ``url`` 以外所有参数都是可选的.
:arg string url: fetch 的 URL
:arg string method: HTTP 方法, e.g. "GET" or "POST"
:arg headers: 额外的 HTTP 请求头
:type headers: `~tornado.httputil.HTTPHeaders` 或 `dict`
:arg body: HTTP 请求体字符串 (byte 或 unicode; 如果是 unicode
则使用 utf-8 编码)
:arg body_producer: 可以被用于延迟/异步请求体调用.
它可以被调用, 带有一个参数, 一个 ``write`` 函数, 并应该
返回一个 `.Future` 对象. 它应该在新的数据可用时调用 write 函数.
write 函数返回一个可用于流程控制的 `.Future` 对象.
只能指定 ``body`` 和 ``body_producer`` 其中之一.
``body_producer`` 不被 ``curl_httpclient`` 支持.
当使用 ``body_producer`` 时, 建议传递一个
``Content-Length`` 头, 否则将使用其他的分块编码,
并且很多服务断不支持请求的分块编码. Tornado 4.0 新增
:arg string auth_username: HTTP 认证的用户名
:arg string auth_password: HTTP 认证的密码
:arg string auth_mode: 认证模式; 默认是 "basic".
所允许的值是根据实现方式定义的; ``curl_httpclient``
支持 "basic" 和 "digest"; ``simple_httpclient`` 只支持 "basic"
:arg float connect_timeout: 初始化连接的超时时间
:arg float request_timeout: 整个请求的超时时间
:arg if_modified_since: ``If-Modified-Since`` 头的时间戳
:type if_modified_since: `datetime` 或 `float`
:arg bool follow_redirects: 是否应该自动跟随重定向还是返回 3xx 响应?
:arg int max_redirects: ``follow_redirects`` 的最大次数限制
:arg string user_agent: ``User-Agent`` 头
:arg bool decompress_response: 从服务器请求一个压缩过的响应, 在下载
后对其解压缩. 默认是 True.
Tornado 4.0 新增.
:arg bool use_gzip: ``decompress_response`` 的别名从 Tornado 4.0 已弃用.
:arg string network_interface: 请求所使用的网络接口.
只有 ``curl_httpclient`` ; 请看下面的备注.
:arg callable streaming_callback: 如果设置了, ``streaming_callback`` 将
用它接收到的数据块执行, 并且
``HTTPResponse.body`` 和 ``HTTPResponse.buffer`` 在最后的响应中将为空.
:arg callable header_callback: 如果设置了, ``header_callback`` 将
在接收到每行头信息时运行(包括第一行, e.g. ``HTTP/1.0 200 OK\r\n``,
最后一行只包含 ``\r\n``. 所有行都包含结尾的换行符).
``HTTPResponse.headers`` 在最终响应中将为空. 这与
``streaming_callback`` 结合是最有用的, 因为它是在请求正在进行时
访问头信息唯一的方法.
:arg callable prepare_curl_callback: 如果设置, 将使用
``pycurl.Curl`` 对象调用, 以允许应用程序进行额外的
``setopt`` 调用.
:arg string proxy_host: HTTP 代理主机名. 如果想要使用代理,
``proxy_host`` 和 ``proxy_port`` 必须设置; ``proxy_username`` 和
``proxy_pass`` 是可选项. 目前只有 ``curl_httpclient`` 支持代理.
:arg int proxy_port: HTTP 代理端口
:arg string proxy_username: HTTP 代理用户名
:arg string proxy_password: HTTP 代理密码
:arg bool allow_nonstandard_methods: 允许 ``method`` 参数使用未知值?
:arg bool validate_cert: 对于 HTTPS 请求, 是否验证服务器的证书?
:arg string ca_certs: PEM 格式的 CA 证书的文件名, 或者默认为 None.
当与 ``curl_httpclient`` 一起使用时参阅下面的注释.
:arg string client_key: 客户端 SSL key 文件名(如果有).
当与 ``curl_httpclient`` 一起使用时参阅下面的注释.
:arg string client_cert: 客户端 SSL 证书的文件名(如果有).
当与 ``curl_httpclient`` 一起使用时参阅下面的注释.
:arg ssl.SSLContext ssl_options: 用在
``simple_httpclient`` (``curl_httpclient`` 不支持) 的
`ssl.SSLContext` 对象.
覆写 ``validate_cert``, ``ca_certs``, ``client_key``,
和 ``client_cert``.
:arg bool allow_ipv6: 当 IPv6 可用时是否使用? 默认是 true.
:arg bool expect_100_continue: 如果为 true, 发送
``Expect: 100-continue`` 头并在发送请求体前等待继续响应.
只被 simple_httpclient 支持.
.. 注意::
当使用 ``curl_httpclient`` 时, 某些选项可能会被后续获取
的继承, 因为 ``pycurl`` 不允许它们被彻底重置. 这适用于
``ca_certs``, ``client_key``, ``client_cert``, 和
``network_interface`` 参数. 如果你使用这些参数, 你应该在
每次请求中都传递它们(你不必总使用相同的值, 但不能混合
指定了这些参数和使用默认参数的请求).
.. versionadded:: 3.1
``auth_mode`` 参数.
.. versionadded:: 4.0
``body_producer`` 和 ``expect_100_continue`` 参数.
.. versionadded:: 4.2
``ssl_options`` 参数.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def body_producer(self):
return self._body_producer
@body_producer.setter
def body_producer(self, value):
self._body_producer = stack_context.wrap(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object):
"""HTTP 响应对象.
属性:
* request: HTTPRequest 对象
* code: HTTP 状态码数值, e.g. 200 或 404
* reason: 人类可读的, 对状态码原因的简短描述
* headers: `tornado.httputil.HTTPHeaders` 对象
* effective_url: 跟随重定向后资源的最后位置
* buffer: 响应体的 ``cStringIO`` 对象
* body: string 化的响应体 (从 ``self.buffer`` 的需求创建)
* error: 任何异常对象
* request_time: 请求开始到结束的时间(秒)
* time_info: 来自请求的诊断时间信息的字典.
可用数据可能会更改, 不过当前在用的时间信息是
http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
加上 ``queue``, 这是通过等待在 `AsyncHTTPClient` 的 ``max_clients``
设置下的插槽引入的延迟(如果有的话).
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
time_info=None, reason=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, message=self.reason,
response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
self.time_info = time_info or {}
def _get_body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
body = property(_get_body)
def rethrow(self):
"""如果请求中有错误发生, 将抛出一个 `HTTPError`."""
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPError(Exception):
"""一个 HTTP 请求失败后抛出的异常.
属性:
* ``code`` - 整数的 HTTP 错误码, e.g. 404. 当没有接收到 HTTP 响应时
将会使用 599 错误码, e.g. 超时.
* ``response`` - 全部的 `HTTPResponse` 对象.
注意如果 ``follow_redirects`` 为 False, 重定向将导致 HTTPErrors,
并且你可以通过 ``error.response.headers['Location']`` 查看重定向的
描述.
"""
def __init__(self, code, message=None, response=None):
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPError, self).__init__(code, message, response)
def __str__(self):
return "HTTP %d: %s" % (self.code, self.message)
class _RequestProxy(object):
"""将对象和默认字典相结合.
本质上是被 AsyncHTTPClient 的实现使用.
"""
def __init__(self, request, defaults):
self.request = request
self.defaults = defaults
def __getattr__(self, name):
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main():
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
| tao12345666333/tornado-zh | tornado/httpclient.py | Python | mit | 25,743 |
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
return [int(i) for i in str(int(''.join(map(str, digits)))+1)]
Solution().plusOne([0]) | xingjian-f/Leetcode-solution | 66. Plus One.py | Python | mit | 232 |
#!/usr/bin/env python3
# coding: utf-8
#
import re
import os
import time
import argparse
import yaml
import bunch
import uiautomator2 as u2
from logzero import logger
CLICK = "click"
# swipe
SWIPE_UP = "swipe_up"
SWIPE_RIGHT = "swipe_right"
SWIPE_LEFT = "swipe_left"
SWIPE_DOWN = "swipe_down"
SCREENSHOT = "screenshot"
EXIST = "assert_exist"
WAIT = "wait"
def split_step(text: str):
__alias = {
"点击": CLICK,
"上滑": SWIPE_UP,
"右滑": SWIPE_RIGHT,
"左滑": SWIPE_LEFT,
"下滑": SWIPE_DOWN,
"截图": SCREENSHOT,
"存在": EXIST,
"等待": WAIT,
}
for keyword in __alias.keys():
if text.startswith(keyword):
body = text[len(keyword):].strip()
return __alias.get(keyword, keyword), body
else:
raise RuntimeError("Step unable to parse", text)
def read_file_content(path: str, mode:str = "r") -> str:
with open(path, mode) as f:
return f.read()
def run_step(cf: bunch.Bunch, app: u2.Session, step: str):
logger.info("Step: %s", step)
oper, body = split_step(step)
logger.debug("parse as: %s %s", oper, body)
if oper == CLICK:
app.xpath(body).click()
elif oper == SWIPE_RIGHT:
app.xpath(body).swipe("right")
elif oper == SWIPE_UP:
app.xpath(body).swipe("up")
elif oper == SWIPE_LEFT:
app.xpath(body).swipe("left")
elif oper == SWIPE_DOWN:
app.xpath(body).swipe("down")
elif oper == SCREENSHOT:
output_dir = "./output"
filename = "screen-%d.jpg" % int(time.time()*1000)
if body:
filename = body
name_noext, ext = os.path.splitext(filename)
if ext.lower() not in ['.jpg', '.jpeg', '.png']:
ext = ".jpg"
os.makedirs(cf.output_directory, exist_ok=True)
filename = os.path.join(cf.output_directory, name_noext + ext)
logger.debug("Save screenshot: %s", filename)
app.screenshot().save(filename)
elif oper == EXIST:
assert app.xpath(body).wait(), body
elif oper == WAIT:
#if re.match("^[\d\.]+$")
if body.isdigit():
seconds = int(body)
logger.info("Sleep %d seconds", seconds)
time.sleep(seconds)
else:
app.xpath(body).wait()
else:
raise RuntimeError("Unhandled operation", oper)
def run_conf(d, conf_filename: str):
d.healthcheck()
d.xpath.when("允许").click()
d.xpath.watch_background(2.0)
cf = yaml.load(read_file_content(conf_filename), Loader=yaml.SafeLoader)
default = {
"output_directory": "output",
"action_before_delay": 0,
"action_after_delay": 0,
"skip_cleanup": False,
}
for k, v in default.items():
cf.setdefault(k, v)
cf = bunch.Bunch(cf)
print("Author:", cf.author)
print("Description:", cf.description)
print("Package:", cf.package)
logger.debug("action_delay: %.1f / %.1f", cf.action_before_delay, cf.action_after_delay)
app = d.session(cf.package)
for step in cf.steps:
time.sleep(cf.action_before_delay)
run_step(cf, app, step)
time.sleep(cf.action_after_delay)
if not cf.skip_cleanup:
app.close()
device = None
conf_filename = None
def test_entry():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--command", help="run single step command")
parser.add_argument("-s", "--serial", help="run single step command")
parser.add_argument("conf_filename", default="test.yml", nargs="?", help="config filename")
args = parser.parse_args()
d = u2.connect(args.serial)
if args.command:
cf = bunch.Bunch({"output_directory": "output"})
app = d.session()
run_step(cf, app, args.command)
else:
run_conf(d, args.conf_filename)
| openatx/uiautomator2 | examples/runyaml/run.py | Python | mit | 3,918 |
from __future__ import unicode_literals
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Oct 29 2014"
__version__ = "3.0.7"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
| yanikou19/pymatgen | pymatgen/__init__.py | Python | mit | 775 |
#===========================================================================================================================
# aims : define both 3 hidden layers model and 4 hidden layers model
#
# input : x : placeholder variable which has as column number the number of features of the training matrix considered
#
# training_data_len: Number of features of the training matrix considered
# nclasses: node number of the output layer
# train_vars_name: set as initnet_vars_name in main_train_net.py or as ftnet_vars_name in 3_main_fine_tuning.py
#
# return : x : first layer
# out_layer : output layer
#===========================================================================================================================
import tensorflow as tf
from . import freeze
from . import utilities
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_nodes_hl4=500
#======================================================================================================
# I CREATE A 3HL FF MODEL
#======================================================================================================
# NETWORK: 3 hidden layers and 500 neurons per layer
def new_ff_model3(x, ninputdata_len, nclasses, train_vars_name):
# with tf.name_scope("inputs"):
# x = tf.placeholder(tf.float32, [None, ninputdata_len], name='I')
with tf.name_scope(train_vars_name):
W1 = weights([ninputdata_len, n_nodes_hl1], 'W1')
b1 = biases([n_nodes_hl1], 'b1')
W2 = weights([n_nodes_hl1, n_nodes_hl2], 'W2')
b2 = biases([n_nodes_hl2], 'b2')
W3 = weights([n_nodes_hl2, n_nodes_hl3], 'W3')
b3 = biases([n_nodes_hl3], 'b3')
WOUT = weights_out([n_nodes_hl3, nclasses], 'WOUT')
bOUT = biases_out([nclasses], 'bOUT')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W1), b1)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_3, WOUT), bOUT, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
#======================================================================================================
# I CREATE A 4HL FF MODEL
#======================================================================================================
# NETWORK: 4 hidden layers and 500 neurons per layer
def new_ff_model4(x, ninputdata_len, nclasses, train_vars_name):
# with tf.name_scope("inputs"):
# x = tf.placeholder(tf.float32, [None, ninputdata_len], name='I')
with tf.name_scope(train_vars_name):
W1 = weightsUniform([ninputdata_len, n_nodes_hl1], 'W1')
b1 = biases([n_nodes_hl1], 'b1')
W2 = weightsUniform([n_nodes_hl1, n_nodes_hl2], 'W2')
b2 = biases([n_nodes_hl2], 'b2')
W3 = weightsUniform([n_nodes_hl2, n_nodes_hl3], 'W3')
b3 = biases([n_nodes_hl3], 'b3')
W4 = weightsUniform([n_nodes_hl3, n_nodes_hl4], 'W4')
b4 = biases([n_nodes_hl4], 'b4')
WOUT = weightsUniform_out([n_nodes_hl4, nclasses], 'WOUT')
bOUT = biases_out([nclasses], 'bOUT')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W1), b1)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT), bOUT, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# I ADAPT AN EXISTING 3HL FF NET => 4HL NET
# ======================================================================================================
# I inherit from a graph the 2nd & 3rd hidden layers' weights
# I create a 4th hidden layer
# I will train the latter + the 1st and the OutLayer
def adapt_ff_model3(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None:
prefix = prefix + "/"
else:
prefix = ""
nodes = [n.name for n in graph.as_graph_def().node]
W2 = utilities.getNodeBySubstring(graph, prefix + 'model/W2', nodes)
W3 = utilities.getNodeBySubstring(graph, prefix + 'model/W3', nodes)
b2 = utilities.getNodeBySubstring(graph, prefix + 'model/b2', nodes)
b3 = utilities.getNodeBySubstring(graph, prefix + 'model/b3', nodes)
with tf.variable_scope(train_vars_name):
W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
b11p = biases([n_nodes_hl1], 'b11p')
W4 = weights([n_nodes_hl3, n_nodes_hl4], 'W4')
b4 = biases([n_nodes_hl4], 'b4')
WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# I RE-ADAPT AN EXISTING 4HL FF NET => 4HL NET
# ======================================================================================================
# A)
# I inherit from a graph the 2nd & 3rd & 4th hidden layers' weights
# I will train first and last layer
def readapt_ff_adaptedmodel(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None:
prefix = prefix + "/"
else:
prefix = ""
nodes = [n.name for n in graph.as_graph_def().node]
W2 = utilities.getNodeBySubstring(graph, prefix + 'model/W2', nodes)
W3 = utilities.getNodeBySubstring(graph, prefix + 'model/W3', nodes)
W4 = utilities.getNodeBySubstring(graph, prefix + train_vars_name + '/W4', nodes)
b2 = utilities.getNodeBySubstring(graph, prefix + 'model/b2', nodes)
b3 = utilities.getNodeBySubstring(graph, prefix + 'model/b3', nodes)
b4 = utilities.getNodeBySubstring(graph, prefix + train_vars_name + '/b4', nodes)
with tf.variable_scope(train_vars_name):
W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
b11p = biases([n_nodes_hl1], 'b11p')
WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# B)
# I inherit all the existing weights (nodes names obtained according to : adapt_ff_model3
# I train everything
def readapt_ff_adaptedmodel_2(x, ninputdata_len, noutputclasses, train_vars_name, graph, prefix=None):
if prefix is not None: prefix = prefix + "/"
b11p = graph.get_tensor_by_name('model/b11p:0')
b2 = graph.get_tensor_by_name('model/b2:0')
b3 = graph.get_tensor_by_name('model/b3:0')
b4 = graph.get_tensor_by_name('model/b4:0')
bOUT1p = graph.get_tensor_by_name('model/bOUT1p:0')
# with tf.variable_scope(train_vars_name):
# W11p = weights([ninputdata_len, n_nodes_hl1], 'W11p')
# b11p = biases([n_nodes_hl1], 'b11p')
# WOUT1p = weights_out([n_nodes_hl3, noutputclasses], 'WOUT1p')
# bOUT1p = biases_out([noutputclasses], 'bOUT1p')
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, W11p), b11p)
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, W3), b3)
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, W4), b4)
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_4, WOUT1p), bOUT1p, name='O')
soft_layer = tf.nn.softmax(out_layer, name='SMO')
return {'input': x, 'output': out_layer}
# ======================================================================================================
# ======================================================================================================
def weights(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def weights_out(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def weightsUniform(shape, name):
initial = tf.random_uniform(shape, minval=-0.1, maxval=0.1)
return tf.Variable(initial, name=name)
def weightsUniform_out(shape, name):
initial = tf.random_uniform(shape, minval=-0.1, maxval=0.1)
return tf.Variable(initial, name=name)
def biases(shape,name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def biases_out(shape,name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
| allspeak/api.allspeak.eu | web/project/training_api/libs/models.py | Python | mit | 10,685 |
# -*- coding: utf-8 -*-
"""
pytest-pylint
=============
Plugin for py.test for doing pylint tests
"""
from setuptools import setup
setup(
name='pytest-pylint',
description='pytest plugin to check source code with pylint',
long_description=open("README.rst").read(),
license="MIT",
version='0.3.0',
author='Carson Gee',
author_email='x@carsongee.com',
url='https://github.com/carsongee/pytest-pylint',
py_modules=['pytest_pylint'],
entry_points={'pytest11': ['pylint = pytest_pylint']},
install_requires=['pytest>=2.4', 'pylint', 'six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| rutsky/pytest-pylint | setup.py | Python | mit | 906 |
import logging
from pdfminer.psparser import KWD, LIT, PSBaseParser, PSStackParser, PSEOF
logger = logging.getLogger(__name__)
class TestPSBaseParser:
"""Simplistic Test cases"""
TESTDATA = rb"""%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
"""
TOKENS = [
(5, KWD(b"begin")),
(11, KWD(b"end")),
(16, KWD(b'"')),
(19, KWD(b"@")),
(21, KWD(b"#")),
(23, LIT("a")),
(25, LIT("BCD")),
(30, LIT("Some_Name")),
(41, LIT("foo_xbaa")),
(54, 0),
(56, 1),
(59, -2),
(62, 0.5),
(65, 1.234),
(71, b"abc"),
(77, b""),
(80, b"abc ( def ) ghi"),
(98, b"def \x00 4ghi"),
(118, b"bach\\slask"),
(132, b"foo\nbaa"),
(143, b"this % is not a comment."),
(170, b"foo\nbaa"),
(180, b"foobaa"),
(191, b""),
(194, b" "),
(199, b"@@ "),
(211, b"\xab\xcd\x00\x124\x05"),
(226, KWD(b"func")),
(230, LIT("a")),
(232, LIT("b")),
(234, KWD(b"{")),
(235, b"c"),
(238, KWD(b"do*")),
(241, KWD(b"}")),
(242, KWD(b"def")),
(246, KWD(b"[")),
(248, 1),
(250, b"z"),
(254, KWD(b"!")),
(256, KWD(b"]")),
(258, KWD(b"<<")),
(261, LIT("foo")),
(266, b"bar"),
(272, KWD(b">>")),
]
OBJS = [
(23, LIT("a")),
(25, LIT("BCD")),
(30, LIT("Some_Name")),
(41, LIT("foo_xbaa")),
(54, 0),
(56, 1),
(59, -2),
(62, 0.5),
(65, 1.234),
(71, b"abc"),
(77, b""),
(80, b"abc ( def ) ghi"),
(98, b"def \x00 4ghi"),
(118, b"bach\\slask"),
(132, b"foo\nbaa"),
(143, b"this % is not a comment."),
(170, b"foo\nbaa"),
(180, b"foobaa"),
(191, b""),
(194, b" "),
(199, b"@@ "),
(211, b"\xab\xcd\x00\x124\x05"),
(230, LIT("a")),
(232, LIT("b")),
(234, [b"c"]),
(246, [1, b"z"]),
(258, {"foo": b"bar"}),
]
def get_tokens(self, s):
from io import BytesIO
class MyParser(PSBaseParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
from io import BytesIO
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(self.TESTDATA)
logger.info(tokens)
assert tokens == self.TOKENS
return
def test_2(self):
objs = self.get_objects(self.TESTDATA)
logger.info(objs)
assert objs == self.OBJS
return
| pdfminer/pdfminer.six | tests/test_pdfminer_psparser.py | Python | mit | 3,405 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0056_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
| WikipediaLibrary/TWLight | TWLight/users/migrations/0057_expire_all_sessions.py | Python | mit | 818 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tdjango.tests.testapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| calston/tdjango | manage_test.py | Python | mit | 264 |
#!/usr/bin/env python
PACKAGE_NAME = 'shiba_teleop'
import roslib
roslib.load_manifest(PACKAGE_NAME)
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
import rospkg
FORWARD = 1
BACKWARDS = 2
SPINNING = 3
STOPPED = 4
linear_increment = 0.3
max_linear_vel = 1.0
min_linear_vel = -1.0
default_linear_vel = 0.1
angular_increment = 0.1
max_angular_vel = 0.6
spin_speed = 1.0
last_joy_message = None
linear_vel = 0.0
angular_vel = 0.0
last_angular_acceleration = 0
rotating = False
state = STOPPED
| Laika-ETS/shiba | shiba_ws/src/shiba_teleop/script/teleop.py | Python | mit | 533 |
# CodeIgniter
# http://codeigniter.com
#
# An open source application development framework for PHP
#
# This content is released under the MIT License (MIT)
#
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Copyright (c) 2008 - 2014, EllisLab, Inc. (http://ellislab.com/)
# Copyright (c) 2014 - 2015, British Columbia Institute of Technology (http://bcit.ca/)
#
# http://opensource.org/licenses/MIT MIT License
import re
import copy
from pygments.lexer import DelegatingLexer
from pygments.lexers.web import PhpLexer, HtmlLexer
__all__ = [ 'CodeIgniterLexer' ]
class CodeIgniterLexer(DelegatingLexer):
"""
Handles HTML, PHP, JavaScript, and CSS is highlighted
PHP is highlighted with the "startline" option
"""
name = 'CodeIgniter'
aliases = [ 'ci', 'codeigniter' ]
filenames = [ '*.html', '*.css', '*.php', '*.xml', '*.static' ]
mimetypes = [ 'text/html', 'application/xhtml+xml' ]
def __init__(self, **options):
super(CodeIgniterLexer, self).__init__(HtmlLexer,
PhpLexer,
startinline=True)
| ajose1024/Code_Igniter_Extended | user_guide_src/cilexer/cilexer/cilexer.py | Python | mit | 2,222 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "juisapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| snirp/juis | manage.py | Python | mit | 250 |
#!/bin/python
'''
Library for formulating and solving game trees as linear programs.
'''
class Node(object):
'''Abstract class to represent a node in the game tree.'''
def solve(self):
'''
Should populate the solutions dictionary.
solutions: TerminalNode ==> list of lists of inequalities
Basically, if you treat the inequalities as booleans, it stores
the requirements to arrive at the terminal node in CDF form.
'''
raise NotImplementedError(
"Solve is not defined for Node. \
Instantiate a subclass instead")
class TerminalNode(Node):
'''A leaf in the game tree.'''
def __init__(self, name, utilities):
'''
@name - A string which uniquely identifies this node
@utilities - Anything.
Must mesh with FolderNode utility functions though
'''
self.name = name
self.utilities = utilities
self.solutions = {self: singleton(True)}
def solve(self):
# pass because the only solution is this node itself
# stops the recursion
pass
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
class FolderNode(Node):
def __init__(self, children, util_func):
self.children = children
self.solutions = {}
self.util = util_func
def solve(self):
'''Calls solve on entire subtree too'''
# if empty folder, no children
if len(self.children) == 0:
return
# at least 1 child, so solve all of them
for child in self.children:
child.solve()
for leaf in child.solutions:
self.solutions[leaf] = singleton(False)
# if only one child, there is no choice, so use decision makeing
# criteria of that child
if len(self.children) == 1:
self.solution = self.children[0].solutions
return
# if more than 1 child, will need to find all ways possible to arrive
# at each leaf
for leftLeaf, rightLeaf, leftChild, rightChild in self._gen_children():
# requirements to come down to decision between these two leaves
req = And(leftChild.solutions[leftLeaf], rightChild.solutions[rightLeaf])
# get utilities of each leaf
leftUtil = self.util(leftLeaf)
rightUtil = self.util(rightLeaf)
# to choose left leaf, need req and that left is better than right
reqLeftWins = And(req, singleton( leftUtil >= rightUtil ))
reqRightWins = And(req, singleton( rightUtil >= leftUtil ))
# add this to possible solutions
self.solutions[leftLeaf] = Or(self.solutions[leftLeaf], reqLeftWins)
self.solutions[rightLeaf] = Or(self.solutions[rightLeaf], reqRightWins)
def _gen_children(self):
'''
Generator for all pairs of leaves l1, l2 st.
l1 and l2 are in different immediate subtrees.
Also includes these immediate subtrees roots i.e. this nodes
direct children.
So yields: (leftLeaf, rightLeaf, leftChild, rightChild, )
'''
for i in xrange(len(self.children)-1):
for j in xrange(i + 1, len(self.children)):
leftChild = self.children[i]
rightChild = self.children[j]
leftLeaves = leftChild.solutions.keys()
rightLeaves = rightChild.solutions.keys()
for leftLeaf in leftLeaves:
for rightLeaf in rightLeaves:
yield (leftLeaf, rightLeaf, leftChild, rightChild, )
# functions which maintain CDF form of inequalities
def And(a, b):
output = []
for x in a:
for y in b:
output.append(x+y)
# must be tuple to be able to add together
return tuple(output)
def Or(a, b):
return a+b
def singleton(x):
# must be a list of lists
return ((x,),)
| jdhenke/ally | core.py | Python | mit | 4,087 |
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
import threading
import os
import time
spoolgore_local = threading.local()
class EmailBackend(BaseEmailBackend):
__tmp__ = "%s/tmp" % settings.SPOOLGORE_DIRECTORY
def send_messages(self, email_messages):
pid = os.getpid()
tid = threading.current_thread().ident
num_sent = 0
if not email_messages:
return
for email_message in email_messages:
if self._send(email_message.message().as_string(), pid, tid):
num_sent += 1
return num_sent
def fsyncspool(self):
"""
Call fsync() on the spool directory
"""
fd = -1
try:
fd = os.open(settings.SPOOLGORE_DIRECTORY, os.O_RDONLY)
os.fsync(fd)
finally:
if fd > -1: os.close(fd)
def _send(self, data, pid, tid):
if not hasattr(spoolgore_local, 'counter'):
spoolgore_local.counter = 0
spoolgore_local.counter += 1
filename = "%f_%s_%d_%d_%d" % (time.time(), time.strftime("%Y.%m.%d.%H.%M.%S"), pid, tid, spoolgore_local.counter)
tmp = "%s/%s" % (self.__tmp__, filename)
if not os.path.exists(self.__tmp__):
os.makedirs(self.__tmp__)
spool = "%s/%s" % (settings.SPOOLGORE_DIRECTORY, filename)
with open(tmp, 'w') as f:
f.write(data)
try:
os.link(tmp, spool)
self.fsyncspool()
finally:
os.unlink(tmp)
return True
| 20tab/django-spoolgore | spoolgore/backend.py | Python | mit | 1,588 |
# hackerrank - Algorithms: Time Conversion
# Written by James Andreou, University of Waterloo
S = raw_input()
TYPE = S[len(S)-2]
if S[:2] == "12":
if TYPE == "A":
print "00" + S[2:-2]
else:
print S[:-2]
elif TYPE == "P":
HOUR = int(S[:2]) + 12
print str(HOUR) + S[2:-2]
else:
print S[:-2] | jamesandreou/hackerrank-solutions | warmup/hr_time_conversion.py | Python | mit | 298 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
kw = {
'name': 'draw',
'version': '0.0.1',
'description': 'A drawing tool for IPython',
'long_description': "",
'author': 'Naoki Nishida',
'author_email': 'domitry@gmail.com',
'license': 'MIT License',
'url': 'https://github.com/domitry/draw',
'keywords': 'data visualization',
'classifiers': (
'License :: OSI Approved :: MIT License'
),
'packages': ['draw'],
'install_requires': (
'ipython'
),
'zip_safe': True,
}
setup(**kw)
| domitry/draw | setup.py | Python | mit | 601 |
import os
import runpy
from codecs import open
from setuptools import setup, find_packages
# Based on https://github.com/pypa/sampleproject/blob/master/setup.py
# and https://python-packaging-user-guide.readthedocs.org/
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"]
setup(
name="mitmproxy",
version=VERSION,
description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
long_description=long_description,
url="http://mitmproxy.org",
author="Aldo Cortesi",
author_email="aldo@corte.si",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Console :: Curses",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Security",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: Proxy Servers",
"Topic :: Software Development :: Testing"
],
packages=find_packages(include=[
"mitmproxy", "mitmproxy.*",
"pathod", "pathod.*",
]),
include_package_data=True,
entry_points={
'console_scripts': [
"mitmproxy = mitmproxy.tools.main:mitmproxy",
"mitmdump = mitmproxy.tools.main:mitmdump",
"mitmweb = mitmproxy.tools.main:mitmweb",
"pathod = pathod.pathod_cmdline:go_pathod",
"pathoc = pathod.pathoc_cmdline:go_pathoc"
]
},
# https://packaging.python.org/en/latest/requirements/#install-requires
# It is not considered best practice to use install_requires to pin dependencies to specific versions.
install_requires=[
"blinker>=1.4, <1.5",
"click>=6.2, <7",
"certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
"construct>=2.8, <2.9",
"cryptography>=1.3, <1.9",
"cssutils>=1.0.1, <1.1",
"h2>=3.0, <4",
"html2text>=2016.1.8, <=2016.9.19",
"hyperframe>=5.0, <6",
"jsbeautifier>=1.6.3, <1.7",
"kaitaistruct>=0.6, <0.7",
"passlib>=1.6.5, <1.8",
"pyasn1>=0.1.9, <0.3",
"pyOpenSSL>=16.0, <17.0",
"pyparsing>=2.1.3, <2.3",
"pyperclip>=1.5.22, <1.6",
"requests>=2.9.1, <3",
"ruamel.yaml>=0.13.2, <0.15",
"tornado>=4.3, <4.5",
"urwid>=1.3.1, <1.4",
"watchdog>=0.8.3, <0.9",
"brotlipy>=0.5.1, <0.7",
"sortedcontainers>=1.5.4, <1.6",
# transitive from cryptography, we just blacklist here.
# https://github.com/pypa/setuptools/issues/861
"setuptools>=11.3, !=29.0.0",
],
extras_require={
':sys_platform == "win32"': [
"pydivert>=2.0.3, <2.1",
],
':sys_platform != "win32"': [
],
'dev': [
"Flask>=0.10.1, <0.13",
"flake8>=3.2.1, <3.4",
"mypy>=0.501, <0.502",
"rstcheck>=2.2, <4.0",
"tox>=2.3, <3",
"pytest>=3, <3.1",
"pytest-cov>=2.2.1, <3",
"pytest-timeout>=1.0.0, <2",
"pytest-xdist>=1.14, <2",
"pytest-faulthandler>=1.3.0, <2",
"sphinx>=1.3.5, <1.6",
"sphinx-autobuild>=0.5.2, <0.7",
"sphinxcontrib-documentedlist>=0.5.0, <0.7",
"sphinx_rtd_theme>=0.1.9, <0.3",
],
'contentviews': [
],
'examples': [
"beautifulsoup4>=4.4.1, <4.6",
"Pillow>=3.2, <4.1",
]
}
)
| xaxa89/mitmproxy | setup.py | Python | mit | 4,202 |
# Helper functions for the Maine Legislature project
import app_config
import collections
import copytext
import re
import json
import numbers
from unicodedata import normalize
from operator import itemgetter
CACHE = {}
def get_copy():
"""
Thank you Ryan for this neat trick to avoid thrashing the disk
https://github.com/INN/maine-legislature/blob/master/helpers.py#L361-L364
"""
if not CACHE.get('copy', None):
CACHE['copy'] = copytext.Copy(app_config.COPY_PATH)
return CACHE['copy']
CACHE = {}
def get_copy():
"""
Thank you Ryan for this neat trick to avoid thrashing the disk
https://github.com/INN/maine-legislature/blob/master/helpers.py#L361-L364
"""
if not CACHE.get('copy', None):
CACHE['copy'] = copytext.Copy(app_config.COPY_PATH)
return CACHE['copy']
# Please test the first two lines against "01234-4567": it should not return "001234-4567"
# Please test the first two lines against "61234-4567": it should not return "061234-4567"
def format_zip(zip):
if type(zip) == str:
return zip
try:
zip = str(zip)
zip = zip.replace('.0', '')
return zip
except ValueError:
return zip
def get_locations():
copy = get_copy()
locations = copy['locations']
for location in locations:
better_id = location['id'].split('.')
return locations
def get_location_ids():
locations = get_locations()
ids = []
for location in locations:
ids.append(location['id'])
return ids
def get_location_by_slug(slug):
locations = get_locations()
place = None
for location in locations:
if location['id'] == slug:
place = location
break
return place
def get_locations_statuses():
copy = get_copy()
statuses = copy['locations_statuses']
for status in statuses:
if isinstance( status['id'], numbers.Number):
status['id'] = int(float( status['id'] ))
return statuses
def get_location_history_by_slug(slug):
"""
return history, sorted by date then time -> dunno how well this will sort, but we shall see
"""
locations_statuses = get_locations_statuses()
history = []
for row in locations_statuses:
if row['id'] == slug:
history.append( row )
if len( history ) > 1 :
history = sorted( history, key=itemgetter( 'date', 'time' ), reverse=True )
return history
def get_location_status_by_slug(slug):
history = get_location_history_by_slug(slug)
try:
return history[0]
except IndexError:
return {}
def get_location_status_color_by_slug(slug):
status = get_location_status_by_slug(slug)
try:
if status['color'] not in {'red', 'yellow', 'green', 'evacuated'}:
return u'unknown'
else:
return status['color']
except KeyError:
return u'unknown'
def get_location_status_updated_by_slug(slug):
status = get_location_status_by_slug(slug)
try:
return status['date'] + ' ' + status['time']
except KeyError:
return u''
| benlk/harvey-senior-homes | helpers.py | Python | mit | 3,147 |
# -*- coding: utf-8 -*-
from exam.exceptions import ModelDoesNotExist, InvalidParameter
def create_specific_exam(name_class):
try:
app_name = name_class.lower()
module = __import__(app_name + '.models', fromlist=[name_class])
class_ = getattr(module, name_class)
instance = class_()
return instance
except ImportError:
raise ModelDoesNotExist('Model does not exist')
except ValueError:
raise InvalidParameter('Invalid parameter')
def import_class(exam_type):
try:
app_name = exam_type.name_class.lower()
module = __import__(
app_name + '.models', fromlist=[exam_type.name_class])
class_ = getattr(module, exam_type.name_class)
return class_
except ImportError:
raise ModelDoesNotExist('Model does not exist')
except ValueError:
raise InvalidParameter('Invalid parameter')
| msfernandes/anato-hub | core/dynamic_import.py | Python | mit | 920 |
import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class CubicBondType(AbstractBondType):
__slots__ = ['length', 'C2', 'C3', 'order', 'c']
@accepts_compatible_units(None, None,
length=units.nanometers,
C2=units.kilojoules_per_mole * units.nanometers ** (-2),
C3=units.kilojoules_per_mole * units.nanometers ** (-3),
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
length=0.0 * units.nanometers,
C2=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
C3=0.0 * units.kilojoules_per_mole * units.nanometers ** (-3),
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.length = length
self.C2 = C2
self.C3 = C3
class CubicBond(CubicBondType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
length=0.0 * units.nanometers,
C2=0.0 * units.kilojoules_per_mole * units.nanometers ** (-2),
C3=0.0 * units.kilojoules_per_mole * units.nanometers ** (-3),
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
CubicBondType.__init__(self, bondingtype1, bondingtype2,
length=length,
C2=C2,
C3=C3,
order=order, c=c) | ctk3b/InterMol | intermol/forces/cubic_bond_type.py | Python | mit | 1,664 |
# download.py
import urllib.request
print("Downloading")
url = 'https://www.python.org/ftp/python/3.4.1/python-3.4.1.msi'
print('File Downloading')
urllib.request.urlretrieve(url, 'python-3.4.1.msi')
| darkless456/Python | download.py | Python | mit | 208 |
# This file is part of ConfigFile - Parse and edit configuration files.
# Copyright (C) 2011-present Dario Giovannetti <dev@dariogiovannetti.net>
# Licensed under MIT
# https://github.com/kynikos/lib.py.configfile/blob/master/LICENSE
"""
This library provides the :py:class:`ConfigFile` class, whose goal is to
provide an interface for parsing, modifying and writing configuration files.
Main features:
* Support for subsections. Support for sectionless options (root options).
* Read from multiple sources (files, file-like objects, dictionaries or special
compatible objects) and compose them in a single :py:class:`ConfigFile`
object.
* When importing and exporting it is possible to choose what to do with
options only existing in the source, only existing in the destination, or
existing in both with different values.
* Import a configuration source into a particular subsection of an existing
object. Export only a particular subsection of an existing object.
* Preserve the order of sections and options when exporting. Try the best to
preserve any comments too.
* Access sections and options with the
``root('Section', 'Subsection')['option']`` syntax or the
``root('Section')('Subsection')['option']`` syntax.
* save references to subsections with e.g.
``subsection = section('Section', 'Subsection')``.
* Interpolation of option values between sections when importing.
Author: Dario Giovannetti <dev@dariogiovannetti.net>
License: MIT
GitHub: https://www.github.com/kynikos/lib.py.configfile
Issue tracker: https://www.github.com/kynikos/lib.py.configfile/issues
**Note:** as it is clear by reading this page, the documentation is still in a
poor state. If you manage to understand how this library works and want to help
documenting it, you are welcome to fork the GitHub repository and request to
pull your improvements. Everything is written in docstrings in the only
python module of the package.
Also, if you have any questions, do not hesitate to ask in the issue tracker,
or write the author an email!
Examples
========
Basic usage
-----------
Suppose you have these two files:
``/path/to/file``:
.. code-block:: cfg
root_option = demo
[Section1]
test = ok
retest = no
test3 = yes
[Section2.Section2A]
foo = fooo
[Section3]
bar = yay
``/path/to/other_file``:
.. code-block:: cfg
[Section2C]
an_option = 2
Now run this script:
::
from configfile import ConfigFile
conf = ConfigFile("/path/to/file")
conf("Section2").upgrade("path/to/other_file")
option = conf("Section2", "Section2C")["an_option"]
print(option, type(option)) # 2 <class 'str'>
option = conf("Section2")("Section2C").get_int("an_option")
print(option, type(option)) # 2 <class 'int'>
conf.export_add("/path/to/file")
conf["root_option"] = "value"
conf("Section3").export_reset("/path/to/another_file")
You will end up with these files (``/path/to/other_file`` is left
untouched):
``/path/to/file``:
.. code-block:: cfg
root_option = demo
[Section1]
test = ok
retest = no
test3 = yes
[Section2.Section2A]
foo = fooo
[Section2.Section2C]
an_option = 2
[Section3]
bar = yay
``/path/to/another_file``:
.. code-block:: cfg
bar = yay
Interpolation
-------------
Suppose you have this file:
``/path/to/file``:
.. code-block:: cfg
[Section1]
option = foo ${$:Section2$:optionA$}
[Section1.Section2]
optionA = some value
optionB = ${optionA$} test
optionC = test ${$:optionA$}
[Section3]
option = ${Section1$:Section2$:optionA$} bar
Now run this script:
::
from configfile import ConfigFile
conf = ConfigFile("/path/to/file", interpolation=True)
print(conf('Section1')['option']) # foo some value
print(conf('Section1', 'Section2')['optionA']) # some value
print(conf('Section1', 'Section2')['optionB']) # some value test
print(conf('Section1', 'Section2')['optionC']) # test some value
print(conf('Section3')['option']) # some value bar
Module contents
===============
"""
import errno
import re as re_
import collections
import io
class Section(object):
"""
The class for a section in the configuration file, including the root
section. You should never need to instantiate this class directly, use
:py:class:`ConfigFile` instead.
"""
# TODO: Compile only once (bug #20)
_PARSE_SECTION = r'^\s*\[(.+)\]\s*$'
_PARSE_OPTION = r'^\s*([^\=]+?)\s*\=\s*(.*?)\s*$'
_PARSE_COMMENT = r'^\s*[#;]{1}\s*(.*?)\s*$'
_PARSE_IGNORE = r'^\s*$'
_SECTION_SUB = r'^[a-zA-Z_]+(?:\.?[a-zA-Z0-9_]+)*$'
_SECTION_PLAIN = r'^[a-zA-Z_]+[a-zA-Z0-9_]*$'
_OPTION = r'^[a-zA-Z_]+[a-zA-Z0-9_]*$'
_VALUE = r'^.*$'
_SECTION_SEP = r'.'
_OPTION_SEP = r' = '
# "{}" will be replaced with the section name by str.format
_SECTION_MARKERS = r'[{}]'
_COMMENT_MARKER = r'# '
_INTERPOLATION_SPECIAL = '$'
_INTERPOLATION_SPECIAL_ESC = _INTERPOLATION_SPECIAL * 2
_INTERPOLATION_START = _INTERPOLATION_SPECIAL + '{'
_INTERPOLATION_SEP = _INTERPOLATION_SPECIAL + ':'
_INTERPOLATION_END = _INTERPOLATION_SPECIAL + '}'
_INTERPOLATION_SPLIT = (r'(' + r'|'.join(re_.escape(mark) for mark in (
_INTERPOLATION_SPECIAL_ESC, _INTERPOLATION_START,
_INTERPOLATION_SEP, _INTERPOLATION_END)) + r')')
_GET_BOOLEAN_TRUE = ('true', '1', 'yes', 'on', 'enabled')
_GET_BOOLEAN_FALSE = ('false', '0', 'no', 'off', 'disabled')
_GET_BOOLEAN_DEFAULT = None
_DICT_CLASS = collections.OrderedDict
# Use lambda to create a new object every time
_EMPTY_SECTION = lambda self: (self._DICT_CLASS(), self._DICT_CLASS())
def __init__(self, name=None, parent=None, safe_calls=False,
inherit_options=False, subsections=True, ignore_case=True):
"""
Constructor.
:param str name: The name of the section.
:param Section parent: A reference to the parent section object.
:param bool safe_calls: If True, when calling a non-existent
subsection, its closest existing ancestor is returned.
:param bool inherit_options: Whether the section will inherit the
options from its ancestors.
:param bool subsections: If True, subsections are enabled; otherwise
they are disabled.
:param bool ignore_case: If True, section and option names will be
compared ignoring case differences; regular expressions will use
``re.I`` flag.
"""
self._NAME = name
self._PARENT = parent
# TODO: Move constant settings to a Settings class (bug #19)
self._SAFE_CALLS = safe_calls
self._INHERIT_OPTIONS = inherit_options
self._ENABLE_SUBSECTIONS = subsections
self._IGNORE_CASE = ignore_case
self._RE_I = re_.I if self._IGNORE_CASE else 0
self._SECTION = self._SECTION_SUB if self._ENABLE_SUBSECTIONS else \
self._SECTION_PLAIN
self._options = self._DICT_CLASS()
self._subsections = self._DICT_CLASS()
### DATA MODEL ###
def __call__(self, *path, **kwargs):
"""
Enables calling directly the object with a string or sequence of
strings, returning the corresponding subsection object, if existent.
:param path: A sequence of strings, representing a relative path of
section names to the target descendant subsection, whose name is
the last item.
:type path: str
:param bool safe: If True, when calling a non-existent subsection, its
closest existing ancestor is returned.
"""
# The Python 3 definition was:
#def __call__(self, *path, safe=None):
# But to keep compatibility with Python 2 it has been changed to the
# current
safe = kwargs.get('safe')
section = self
for sname in path:
try:
lsname = sname.lower()
except AttributeError:
raise TypeError('Section name must be a string: {}'.format(
sname))
if self._IGNORE_CASE:
for subname in section._subsections:
if lsname == subname.lower():
section = section._subsections[subname]
break
else:
self._finalize_call(safe, sname)
break
else:
try:
section = section._subsections[sname]
except KeyError:
self._finalize_call(safe, sname)
break
return section
def _finalize_call(self, safe, sname):
"""
Auxiliary method for :py:meth:`__call__`.
Process a not-found section name.
"""
if safe not in (True, False):
if self._SAFE_CALLS:
return
elif safe:
return
raise KeyError('Section not found: {}'.format(sname))
def __getitem__(self, opt):
"""
Returns the value for the option specified.
:param str opt: The name of the option whose value must be returned.
"""
item = self.get(opt, fallback=None,
inherit_options=self._INHERIT_OPTIONS)
# self.get returns None as a fallback value if opt is not found:
# however, for compatibility with usual dictionary operations,
# __getitem__ should better raise KeyError in this case
if item is None:
raise KeyError('Option not found: {}'.format(opt))
else:
return item
def __setitem__(self, opt, val):
"""
Stores the provided value in the specified option.
:param str opt: The name of the option.
:param str val: The new value for the option.
"""
if isinstance(opt, str):
if isinstance(val, str):
if self._IGNORE_CASE:
for o in self._options:
if opt.lower() == o.lower():
self._options[o] = val
break
else:
self._options[opt] = val
else:
self._options[opt] = val
else:
raise TypeError('Value must be a string: {}'.format(val))
else:
raise TypeError('Option name must be a string: {}'.format(opt))
def __delitem__(self, opt):
"""
Deletes the specified option.
:param str opt: The name of the option that must be deleted.
"""
try:
lopt = opt.lower()
except AttributeError:
raise TypeError('Option name must be a string: {}'.format(opt))
else:
if self._IGNORE_CASE:
for o in self._options:
if opt.lower() == o.lower():
del self._options[o]
break
else:
raise KeyError('Option not found: {}'.format(opt))
else:
try:
del self._options[opt]
except KeyError:
raise KeyError('Option not found: {}'.format(opt))
def __iter__(self):
"""
Lets iterate over the options of the section (for example with a for
loop).
"""
return iter(self._options)
def __contains__(self, item):
"""
If item is a :py:class:`Section` object, this method returns True if
item (the object, not its name) is a subsection of self; otherwise this
returns True if item is the name of an option in self.
:param item: A :py:class:`Section` object or the name of an option.
:type item: Section or str
"""
if isinstance(item, Section):
return item in self._subsections.values()
elif self._IGNORE_CASE:
for o in self._options:
if item.lower() == o.lower():
return True
else:
return False
else:
return item in self._options
### IMPORTING DATA ###
def set(self, opt, val):
"""
This is an alias for :py:meth:`__setitem__`.
"""
self[opt] = val
def make_subsection(self, name):
"""
Create an empty subsection under the current section if it does not
exist.
:param str name: The name of the new subsection.
"""
# TODO: Use this method, where possible, when creating new sections in
# the other methods
sub = self._EMPTY_SECTION()
sub[1][name] = self._EMPTY_SECTION()
self._import_object(sub, overwrite=False)
def delete(self):
"""
Delete the current section.
"""
del self._PARENT._subsections[self._NAME]
def upgrade(self, *sources, **kwargs):
"""
Import sections and options from a file, file-like object, dictionary
or special object with upgrade mode.
If an option already exists, change its value; if it does not exist,
create it and store its value. For example:
*{A:a,B:b,C:c} upgrade {A:d,D:e} => {A:d,B:b,C:c,D:e}*
See :py:meth:`_import_object` for object compatibility.
:param sources: A sequence of files, file-like objects, dictionaries
and/or special objects.
:param bool interpolation: Enable/disable value interpolation.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def upgrade(self, *sources, interpolation=False):
interpolation = kwargs.get('interpolation', False)
self._import(sources, interpolation=interpolation)
def update(self, *sources, **kwargs):
"""
Import sections and options from a file, file-like object, dictionary
or special object with update mode.
If an option already exists, change its value; if it does not exist,
do not do anything. For example:
*{A:a,B:b,C:c} update {A:d,D:e} => {A:d,B:b,C:c}*
See :py:meth:`_import_object` for object compatibility.
:param sources: A sequence of files, file-like objects, dictionaries
and/or special objects.
:param bool interpolation: Enable/disable value interpolation.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def upgrade(self, *sources, interpolation=False):
interpolation = kwargs.get('interpolation', False)
self._import(sources, add=False, interpolation=interpolation)
def reset(self, *sources, **kwargs):
"""
Import sections and options from a file, file-like object, dictionary
or special object with reset mode.
Delete all options and subsections and recreate everything from the
importing object. For example:
*{A:a,B:b,C:c} reset {A:d,D:e} => {A:d,D:e}*
See :py:meth:`_import_object` for object compatibility.
:param sources: A sequence of files, file-like objects, dictionaries
and/or special objects.
:param bool interpolation: Enable/disable value interpolation.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def upgrade(self, *sources, interpolation=False):
interpolation = kwargs.get('interpolation', False)
self._import(sources, reset=True, interpolation=interpolation)
def add(self, *sources, **kwargs):
"""
Import sections and options from a file, file-like object, dictionary
or special object with add mode.
If an option already exists, do not do anything; if it does not exist,
create it and store its value. For example:
*{A:a,B:b,C:c} add {A:d,D:e} => {A:a,B:b,C:c,D:e}*
See :py:meth:`_import_object` for object compatibility.
:param sources: A sequence of files, file-like objects, dictionaries
and/or special objects.
:param bool interpolation: Enable/disable value interpolation.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def upgrade(self, *sources, interpolation=False):
interpolation = kwargs.get('interpolation', False)
self._import(sources, overwrite=False, interpolation=interpolation)
def _import(self, sources, overwrite=True, add=True, reset=False,
interpolation=False):
"""
Parse some files, file-like objects, dictionaries or special objects
and add their configuration to the existing one.
Distinction between the various source types is done automatically.
:param sources: A sequence of all the file names, file-like objects,
dictionaries or special objects to be parsed; a value of None will
be ignored (useful for creating empty objects that will be
populated programmatically).
:param bool overwrite: This sets whether the next source in the chain
overwrites already imported sections and options; see
:py:meth:`_import_object` for more details.
:param bool add: This sets whether the next source in the chain adds
non-pre-existing sections and options; see _import_object for more
details.
:param bool reset: This sets whether the next source in the chain
removes all the data added by the previous sources.
:param bool interpolation: If True, option values will be interpolated
using values from other options through the special syntax
``${section$:section$:option$}``. Options will be interpolated only
once at importing: all links among options will be lost after
importing.
"""
for source in sources:
if source is None:
continue
elif isinstance(source, str):
obj = self._parse_file(self._open_file(source))
elif isinstance(source, io.IOBase):
obj = self._parse_file(source)
elif isinstance(source, dict):
obj = (source, {})
else:
obj = source
self._import_object(obj, overwrite=overwrite, add=add, reset=reset)
if interpolation:
self._interpolate()
def _open_file(self, cfile):
"""
Open config file for reading.
:param str cfile: The name of the file to be parsed.
"""
try:
return open(cfile, 'r')
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise NonExistentFileError('Cannot find {} ({})'.format(
e.filename, e.strerror))
else:
raise InvalidFileError('Cannot import configuration from {} '
'({})'.format(e.filename, e.strerror))
def _parse_file(self, stream):
"""
Parse a text file and translate it into a compatible object, thus
making it possible to import it.
:param stream: a file-like object to be read from.
"""
with stream:
cdict = self._EMPTY_SECTION()
lastsect = cdict
for lno, line in enumerate(stream):
# Note that the order the various types are evaluated
# matters!
# TODO: Really? What about sorting the tests according
# to their likelihood to pass?
if re_.match(self._PARSE_IGNORE, line, self._RE_I):
continue
if re_.match(self._PARSE_COMMENT, line, self._RE_I):
continue
re_option = re_.match(self._PARSE_OPTION, line, self._RE_I)
if re_option:
lastsect[0][re_option.group(1)] = re_option.group(2)
continue
re_section = re_.match(self._PARSE_SECTION, line,
self._RE_I)
if re_section:
subs = self._parse_subsections(re_section)
d = cdict
for s in subs:
if s not in d[1]:
d[1][s] = self._EMPTY_SECTION()
d = d[1][s]
lastsect = d
continue
raise ParsingError('Invalid line in {}: {} (line {})'
''.format(cfile, line, lno + 1))
return cdict
def _parse_subsections(self, re):
"""
Parse the sections hierarchy in a section line of a text file and
return them in a list.
:param re: regular expression object.
"""
if self._ENABLE_SUBSECTIONS:
return re.group(1).split(self._SECTION_SEP)
else:
return (re.group(1), )
def _import_object(self, cobj, overwrite=True, add=True, reset=False):
"""
Import sections and options from a compatible object.
:param cobj: A special object composed of dictionaries (or compatible
mapping object) and tuples to be imported; a section is represented
by a 2-tuple: its first value is a mapping object that associates
the names of options to their values; its second value is a mapping
object that associates the names of subsections to their 2-tuples.
For example::
cobj = (
{
'option1': 'value',
'option2': 'value'
},
{
'sectionA': (
{
'optionA1': 'value',
'optionA2': 'value',
},
{
'sectionC': (
{
'optionC1': 'value',
'optionC2': 'value',
},
{},
),
},
),
'sectionB': (
{
'optionB1': 'value',
'optionB2': 'value'
},
{},
),
},
)
:param bool overwrite: Whether imported data will overwrite
pre-existing data.
:param bool add: Whether non-pre-existing data will be imported.
:param bool reset: Whether pre-existing data will be cleared.
"""
# TODO: Change "reset" mode to "remove" (complementing "overwrite" and
# "add") (bug #25)
if reset:
self._options = self._DICT_CLASS()
self._subsections = self._DICT_CLASS()
for o in cobj[0]:
if isinstance(o, str) and isinstance(cobj[0][o], str) and \
re_.match(self._OPTION, o, self._RE_I) and \
re_.match(self._VALUE, cobj[0][o], self._RE_I):
self._import_object_option(overwrite, add, reset, o,
cobj[0][o])
else:
raise InvalidObjectError('Invalid option or value: {}: {}'
''.format(o, cobj[0][o]))
for s in cobj[1]:
if isinstance(s, str) and re_.match(self._SECTION, s, self._RE_I):
self._import_object_subsection(overwrite, add, reset, s,
cobj[1][s])
else:
raise InvalidObjectError('Invalid section name: {}'.format(s))
def _import_object_option(self, overwrite, add, reset, opt, val):
"""
Auxiliary method for :py:meth:`_import_object`.
Import the currently-examined option.
"""
if reset:
self._options[opt] = val
return True
if self._IGNORE_CASE:
for o in self._options:
if opt.lower() == o.lower():
# Don't even think of merging these two tests
if overwrite:
self._options[o] = val
return True
break
else:
# Going through the loop above makes sure the option is not yet
# in the section
if add:
self._options[opt] = val
return True
elif opt in self._options:
# Don't even think of merging these two tests
if overwrite:
self._options[opt] = val
return True
elif add:
self._options[opt] = val
return True
return False
def _import_object_subsection(self, overwrite, add, reset, sec, secd):
"""
Auxiliary method for :py:meth:`_import_object`.
Import the currently-examined subsection.
"""
if reset:
self._import_object_subsection_create(overwrite, add, sec, secd)
return True
if self._IGNORE_CASE:
for ss in self._subsections:
if sec.lower() == ss.lower():
# Don't test overwrite here
self._subsections[ss]._import_object(secd,
overwrite=overwrite, add=add)
return True
else:
# Going through the loop above makes sure the section is not
# yet a subsection of the visited section
if add:
self._import_object_subsection_create(overwrite, add, sec,
secd)
return True
elif sec in self._subsections:
# Don't test overwrite here
self._subsections[sec]._import_object(secd, overwrite=overwrite,
add=add)
return True
elif add:
self._import_object_subsection_create(overwrite, add, sec, secd)
return True
return False
def _import_object_subsection_create(self, overwrite, add, sec, secd):
"""
Auxiliary method for :py:meth:`_import_object_subsection`.
Import the currently-examined subsection.
"""
subsection = Section(name=sec, parent=self,
safe_calls=self._SAFE_CALLS,
inherit_options=self._INHERIT_OPTIONS,
subsections=self._ENABLE_SUBSECTIONS,
ignore_case=self._IGNORE_CASE)
subsection._import_object(secd, overwrite=overwrite, add=add)
self._subsections[sec] = subsection
def _interpolate(self):
"""
Interpolate values among different options.
The ``$`` sign is a special character: a ``$`` not followed by ``$``,
``{``, ``:`` or ``}`` will be left ``$``; ``$$`` will be translated as
``$`` both inside or outside an interpolation path; ``${`` will be
considered as the beginning of an interpolation path, unless it is
found inside another interpolation path, and in the latter case it will
be left ``${``; ``$:`` will be considered as a separator between
sections of an interpolation path, unless it is found outside of an
interpolation path, and in the latter case it will be left
``$:``; ``$}`` will be considered as the end of an interpolation path,
unless it is found outside of an interpolation path, and in the latter
case it will be left ``$}``.
Normally all paths will be resolved based on the root section of the
file; anyway, if the interpolation path has only one item, it will be
resolved as an option relative to the current section; otherwise, if
the path starts with ``$:``, the first item will be considered as a
section (or an option, if last in the list) relative to the current
section.
"""
try:
root = self._get_ancestors()[-1]
except IndexError:
root = self
for optname in self._options:
split = re_.split(self._INTERPOLATION_SPLIT,
self._options[optname])
value = ''
resolve = None
for chunk in split:
if resolve is None:
if chunk == self._INTERPOLATION_SPECIAL_ESC:
value += self._INTERPOLATION_SPECIAL
elif chunk == self._INTERPOLATION_START:
resolve = ['']
else:
value += chunk
else:
if chunk == self._INTERPOLATION_SPECIAL_ESC:
resolve[-1] += self._INTERPOLATION_SPECIAL
elif chunk == self._INTERPOLATION_SEP:
resolve.append('')
elif chunk == self._INTERPOLATION_END:
intoptname = resolve.pop()
if len(resolve) == 0:
# TODO: It's currently not possible to write a
# reference to a root option?!?
intsection = self
else:
if resolve[0] == '':
intsection = self
resolve.pop(0)
else:
intsection = root
for s in resolve:
intsection = intsection._subsections[s]
# Use get(intoptname) instead of _options[intoptname]
# so that options are properly inherited if the object
# is configured to do so
value += intsection.get(intoptname)
resolve = None
else:
resolve[-1] += chunk
if resolve is not None:
# The last interpolation wasn't closed, so interpret it as a
# normal string
value += self._INTERPOLATION_START + \
self._INTERPOLATION_SEP.join(resolve)
self._options[optname] = value
for secname in self._subsections:
self._subsections[secname]._interpolate()
### EXPORTING DATA ###
def get(self, opt, fallback=None, inherit_options=None):
"""
Returns the value for the option specified.
:param str opt: The name of the option whose value must be returned.
:param fallback: If set to a string, and the option is not found, this
method returns that string; if set to None (default) it returns
KeyError.
:type fallback: str or None
:param bool inherit_options: If True, if the option is not found in the
current section, it is searched in the parent sections; note that
this can be set as a default for the object, but this setting
overwrites it only for this call.
"""
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
if isinstance(opt, str):
slist = [self, ]
if inherit_options:
slist.extend(self._get_ancestors())
for s in slist:
for o in s._options:
if (self._IGNORE_CASE and opt.lower() == o.lower()) or \
(not self._IGNORE_CASE and opt == o):
return s._options[o]
else:
# Note that if fallback is not specified, this returns None
# which is not a string as expected
return fallback
else:
raise TypeError('Option name must be a string: {}'.format(opt))
def get_str(self, opt, fallback=None, inherit_options=None):
"""
This is an alias for :py:meth:`get`.
This will always return a string.
:param str opt: The name of the option whose value must be returned.
:param fallback: If set to a string, and the option is not found, this
method returns that string; if set to None (default) it returns
KeyError.
:type fallback: str or None
:param bool inherit_options: If True, if the option is not found in the
current section, it is searched in the parent sections; note that
this can be set as a default for the object, but this setting
overwrites it only for this call.
"""
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
return self.get(opt, fallback=fallback,
inherit_options=inherit_options)
def get_int(self, opt, fallback=None, inherit_options=None):
"""
This method tries to return an integer from the value of an option.
:param str opt: The name of the option whose value must be returned.
:param fallback: If set to a string, and the option is not found, this
method returns that string; if set to None (default) it returns
KeyError.
:type fallback: str or None
:param bool inherit_options: If True, if the option is not found in the
current section, it is searched in the parent sections; note that
this can be set as a default for the object, but this setting
overwrites it only for this call.
"""
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
return int(self.get(opt, fallback=fallback,
inherit_options=inherit_options))
def get_float(self, opt, fallback=None, inherit_options=None):
"""
This method tries to return a float from the value of an option.
:param str opt: The name of the option whose value must be returned.
:param fallback: If set to a string, and the option is not found, this
method returns that string; if set to None (default) it returns
KeyError.
:type fallback: str or None
:param bool inherit_options: If True, if the option is not found in the
current section, it is searched in the parent sections; note that
this can be set as a default for the object, but this setting
overwrites it only for this call.
"""
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
return float(self.get(opt, fallback=fallback,
inherit_options=inherit_options))
def get_bool(self, opt, true=(), false=(), default=None, fallback=None,
inherit_options=None):
"""
This method tries to return a boolean status (True or False) from the
value of an option.
:param str opt: The name of the option whose value must be returned.
:param tuple true: A tuple with the strings to be recognized as True.
:param tuple false: A tuple with the strings to be recognized as False.
:param default: If the value is neither in true nor in false tuples,
return this boolean status; if set to None, it raises a ValueError
exception.
:param fallback: If set to None (default), and the option is not found,
it raises KeyError; otherwise this value is evaluated with the true
and false tuples, or the default value.
:param bool inherit_options: If True, if the option is not found in the
current section, it is searched in the parent sections; note that
this can be set as a default for the object, but this setting
overwrites it only for this call.
Note that the characters in the strings are compared in lowercase, so
there is no need to specify all casing variations of a string.
"""
# TODO: Use default values in definition with Settings class (bug #19)
if true == ():
true = self._GET_BOOLEAN_TRUE
if false == ():
false = self._GET_BOOLEAN_FALSE
if default not in (True, False):
default = self._GET_BOOLEAN_DEFAULT
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
v = str(self.get(opt, fallback=fallback,
inherit_options=inherit_options)).lower()
if v in true:
return True
elif v in false:
return False
elif default in (True, False):
return default
else:
raise ValueError('Unrecognized boolean status: {}'.format(
self[opt]))
def _get_ancestors(self):
"""
Return a list with the ancestors of the current section, but not the
current section itself.
"""
slist = []
p = self._PARENT
while p:
slist.append(p)
p = p._PARENT
return slist
def _get_descendants(self):
"""
Return a list with the descendants of the current section, but not the
current section itself.
"""
# Don't do `slist = self._subsections.values()` because the descendants
# for each subsection must be appended after the proper subsection,
# not at the end of the list
slist = []
for section in self._subsections.values():
slist.append(section)
slist.extend(section._get_descendants())
return slist
def get_options(self, ordered=True, inherit_options=None):
"""
Return a dictionary with a copy of option names as keys and their
values as values.
:param bool ordered: If True, return an ordered dictionary; otherwise
return a normal dictionary.
:param bool inherit_options: If True, options are searched also in the
parent sections; note that this can be set as a default for the
object, but this setting overwrites it only for this call.
"""
if inherit_options not in (True, False):
inherit_options = self._INHERIT_OPTIONS
if ordered:
d = self._DICT_CLASS()
else:
d = {}
slist = [self, ]
if inherit_options:
slist.extend(self._get_ancestors())
for s in slist:
for o in s._options:
d.setdefault(o, s._options[o][:])
# There should be no need to check _IGNORE_CASE, in fact it has
# already been done at importing time
return d
def get_sections(self):
"""
Return a view of the names of the child sections.
"""
return self._subsections.keys()
def get_tree(self, ordered=True, path=False):
"""
Return a compatible object with options and subsections.
:param bool ordered: If True, the object uses ordered dictionaries;
otherwise it uses normal dictionaries.
:param bool path: If True, return the current section as a subsection
of the parent sections.
"""
d = self._recurse_tree(ordered=ordered)
if path:
p = self._PARENT
n = self._NAME
while p:
if ordered:
e = self._EMPTY_SECTION()
else:
e = ({}, {})
e[1][n] = d
d = e
n = p._NAME
p = p._PARENT
return d
def _recurse_tree(self, ordered=True):
"""
Auxiliary recursor for :py:meth:`get_tree`.
"""
options = self.get_options(ordered=ordered, inherit_options=False)
if ordered:
d = (options, self._DICT_CLASS())
else:
d = (options, {})
for s in self._subsections:
d[1][s] = self._subsections[s]._recurse_tree(ordered=ordered)
return d
def _export(self, targets, overwrite=True, add=True, reset=False,
path=True):
"""
Export the configuration to one or more files.
:param targets: A sequence with the target file names.
:param bool overwrite: This sets whether sections and options in the
file are overwritten; see _import_object for more details.
:param bool add: This sets whether non-pre-existing sections and option
are added; see _import_object for more details.
:param bool path: If True, section names are exported with their full
path.
"""
# TODO: Change "reset" mode to "remove" (complementing "overwrite" and
# "add") (bug #25)
for f in targets:
self._export_file(f, overwrite=overwrite, add=add, reset=reset,
path=path)
def export_upgrade(self, *targets, **kwargs):
"""
Export sections and options to one or more files with upgrade mode.
If an option already exists, change its value; if it does not exist,
create it and store its value. For example:
*{A:d,D:e} upgrade {A:a,B:b,C:c} => {A:d,B:b,C:c,D:e}*
See :py:meth:`_export_file` for object compatibility.
:param targets: A sequence with the target file names.
:param bool path: If True, section names are exported with their full
path.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def export_upgrade(self, *targets, path=True):
path = kwargs.get('path', True)
self._export(targets, path=path)
def export_update(self, *targets, **kwargs):
"""
Export sections and options to one or more files with update mode.
If an option already exists, change its value; if it does not exist,
do not do anything. For example:
*{A:d,D:e} update {A:a,B:b,C:c} => {A:d,B:b,C:c}*
See :py:meth:`_export_file` for object compatibility.
:param targets: A sequence with the target file names.
:param bool path: If True, section names are exported with their full
path.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def export_upgrade(self, *targets, path=True):
path = kwargs.get('path', True)
self._export(targets, add=False, path=path)
def export_reset(self, *targets, **kwargs):
"""
Export sections and options to one or more files with reset mode.
Delete all options and subsections and recreate everything from the
importing object. For example:
*{A:d,D:e} reset {A:a,B:b,C:c} => {A:d,D:e}*
See :py:meth:`_export_file` for object compatibility.
:param targets: A sequence with the target file names.
:param bool path: If True, section names are exported with their full
path.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def export_upgrade(self, *targets, path=True):
path = kwargs.get('path', True)
self._export(targets, reset=True, path=path)
def export_add(self, *targets, **kwargs):
"""
Export sections and options to one or more files with add mode.
If an option already exists, do not do anything; if it does not exist,
create it and store its value. For example:
*{A:d,D:e} add {A:a,B:b,C:c} => {A:a,B:b,C:c,D:e}*
See :py:meth:`_export_file` for object compatibility.
:param targets: A sequence with the target file names.
:param bool path: If True, section names are exported with their full
path.
"""
# Necessary for Python 2 compatibility
# The Python 3 definition was:
#def export_upgrade(self, *targets, path=True):
path = kwargs.get('path', True)
self._export(targets, overwrite=False, path=path)
def _export_file(self, cfile, overwrite=True, add=True, reset=False,
path=True):
"""
Export the sections tree to a file.
:param str efile: The target file name.
:param bool overwrite: Whether sections and options already existing in
the file are overwritten.
:param bool add: Whether non-pre-existing data will be exported.
:param bool path: If True, section names are exported with their full
path.
"""
try:
with open(cfile, 'r') as stream:
lines = stream.readlines()
except IOError:
lines = []
else:
# Exclude leading blank lines
for lineN, line in enumerate(lines):
if not re_.match(self._PARSE_IGNORE, line, self._RE_I):
lines = lines[lineN:]
break
else:
lines = []
with open(cfile, 'w') as stream:
BASE_SECTION = self
try:
ROOT_SECTION = self._get_ancestors()[-1]
except IndexError:
ROOT_SECTION = self
readonly_section = False
remaining_descendants = []
else:
if path:
readonly_section = True
remaining_descendants = [BASE_SECTION, ]
else:
# The options without a section (i.e. at the top of the
# file) must be considered part of the current section if
# path is False
readonly_section = False
remaining_descendants = []
remaining_options = BASE_SECTION.get_options(inherit_options=False)
remaining_descendants.extend(BASE_SECTION._get_descendants())
other_lines = []
for line in lines:
re_option = re_.match(self._PARSE_OPTION, line, self._RE_I)
if re_option:
# This also changes other_lines in place
self._export_other_lines(stream, other_lines,
readonly_section, reset)
self._export_file_existing_option(stream, line, re_option,
readonly_section, remaining_options,
overwrite, reset)
continue
re_section = re_.match(self._PARSE_SECTION, line, self._RE_I)
if re_section:
if add:
self._export_file_remaining_options(stream,
readonly_section, remaining_options)
# This also changes other_lines in place
self._export_other_lines_before_existing_section(stream,
other_lines, readonly_section, reset)
# This also changes remaining_descendants in place
(readonly_section, remaining_options) = \
self._export_file_existing_section(
stream, line, re_section,
ROOT_SECTION, BASE_SECTION,
remaining_descendants, path)
continue
# Comments, ignored/invalid lines
other_lines.append(line)
if add:
self._export_file_remaining_options(stream, readonly_section,
remaining_options)
# Don't use _export_other_lines_before_existing_section here
# because any pre-existing unrecognized lines must be restored in
# any case, and since they're at the end of the original file,
# they weren't meant to separate any further sections, so let
# _export_file_remaining_sections handle the addition of a blank
# line
# This also changes other_lines in place
self._export_other_lines(stream, other_lines, readonly_section,
reset)
if add:
self._export_file_remaining_sections(stream, BASE_SECTION,
remaining_descendants, path)
def _export_file_existing_option(self, stream, line, re_option,
readonly_section, remaining_options, overwrite, reset):
"""
Auxiliary method for :py:meth:`_export_file`.
Write the option currently examined from the destination file.
"""
if readonly_section:
stream.write(line)
return True
if self._IGNORE_CASE:
for option in remaining_options:
fkey = re_option.group(1)
fvalue = re_option.group(2)
if fkey.lower() == option.lower():
if overwrite and fvalue != remaining_options[option]:
stream.write(''.join((fkey, self._OPTION_SEP,
remaining_options[option], '\n')))
else:
stream.write(line)
del remaining_options[option]
# There shouldn't be more occurrences of this option (even
# with different casing)
return True
else:
fkey = re_option.group(1)
fvalue = re_option.group(2)
if fkey in remaining_options:
if overwrite and remaining_options[fkey] != fvalue:
stream.write(''.join((fkey, self._OPTION_SEP,
remaining_options[fkey], '\n')))
else:
stream.write(line)
del remaining_options[fkey]
return True
if not reset:
stream.write(line)
return True
return False
def _export_file_remaining_options(self, stream, readonly_section,
remaining_options):
"""
Auxiliary method for :py:meth:`_export_file`.
Write the options from the origin object that were not found in the
destination file.
"""
if not readonly_section:
for option in remaining_options:
stream.write(''.join((option, self._OPTION_SEP,
remaining_options[option], '\n')))
def _export_file_existing_section(self, stream, line, re_section,
ROOT_SECTION, BASE_SECTION, remaining_descendants, path):
"""
Auxiliary method for :py:meth:`_export_file`.
Write the section currently examined from the destination file.
"""
if self._ENABLE_SUBSECTIONS:
names = re_section.group(1).split(self._SECTION_SEP)
else:
names = (re_section.group(1), )
current_section = ROOT_SECTION if path else BASE_SECTION
for name in names:
try:
current_section = current_section(name)
except KeyError:
# The currently parsed section is not in the configuration
# object
readonly_section = True
remaining_options = self._DICT_CLASS()
break
else:
alist = [current_section, ]
alist.extend(current_section._get_ancestors())
if BASE_SECTION in alist:
readonly_section = False
remaining_options = current_section.get_options(
inherit_options=False)
remaining_descendants.remove(current_section)
else:
readonly_section = True
remaining_options = self._DICT_CLASS()
# TODO: If reset (which for all the other modes by default is "deep",
# i.e. it must affect the subsections too) this section and all
# the other "old" subsections must be removed from the file
# (bug #22)
stream.write(line)
return (readonly_section, remaining_options)
def _export_file_remaining_sections(self, stream, BASE_SECTION,
remaining_descendants, path):
"""
Auxiliary method for :py:meth:`_export_file`.
Write the sections and their options from the origin object that
were not found in the destination file.
"""
# Do not add an empty line if at the start of the file
BR = "\n" if stream.tell() > 0 else ""
for section in remaining_descendants:
if len(section._options) > 0:
ancestors = [section._NAME, ]
for ancestor in section._get_ancestors()[:-1]:
if not path and ancestor is BASE_SECTION:
break
ancestors.append(ancestor._NAME)
ancestors.reverse()
stream.write("".join((BR, self._SECTION_MARKERS, "\n")
).format(self._SECTION_SEP.join(ancestors)))
for option in section._options:
stream.write("".join((option, self._OPTION_SEP,
section[option], "\n")))
# All the subsequent sections will need a blank line in any
# case (do not add a double line break after the last option
# because the last option of the last section must have only
# one break)
BR = "\n"
def _export_other_lines(self, stream, other_lines, readonly_section,
reset):
"""
Auxiliary method for :py:meth:`_export_file`.
"""
if readonly_section or not reset:
stream.writelines(other_lines)
other_lines[:] = []
def _export_other_lines_before_existing_section(self, stream, other_lines,
readonly_section, reset):
"""
Auxiliary method for :py:meth:`_export_file`.
"""
if readonly_section or not reset:
stream.writelines(other_lines)
elif stream.tell() > 0:
stream.write("\n")
other_lines[:] = []
class ConfigFile(Section):
"""
The main configuration object.
"""
def __init__(self, *sources, **kwargs):
"""
Constructor.
:param sources: A sequence of all the files, file-like objects,
dictionaries and special objects to be parsed.
:type sources: str, dict or special object (see
:py:meth:`Section._import_object`)
:param str mode: This sets if and how the next source in the chain
overwrites already imported sections and options; available choices
are ``'upgrade'``, ``'update'``, ``'reset'`` and ``'add'`` (see the
respective methods for more details).
:param bool safe_calls: If True, when calling a non-existent
subsection, its closest existing ancestor is returned.
:param bool inherit_options: If True, if an option is not found in a
section, it is searched in the parent sections.
:param bool ignore_case: If True, section and option names will be
compared ignoring case differences; regular expressions will use
``re.I`` flag.
:param bool subsections: If True (default) subsections are allowed.
:param bool interpolation: If True, option values will be interpolated
using values from other options through the special syntax
``${section$:section$:option$}``. Options will be interpolated only
once at importing: all links among options will be lost after
importing.
"""
# The Python 3 definition was:
#def __init__(self,
# *sources,
# mode='upgrade',
# safe_calls=False,
# inherit_options=False,
# subsections=True,
# ignore_case=True,
# interpolation=False):
# But to keep compatibility with Python 2 it has been changed to the
# current
mode = kwargs.get('mode', 'upgrade')
safe_calls = kwargs.get('safe_calls', False)
inherit_options = kwargs.get('inherit_options', False)
subsections = kwargs.get('subsections', True)
ignore_case = kwargs.get('ignore_case', True)
interpolation = kwargs.get('interpolation', False)
# Root section
Section.__init__(self, name=None, parent=None,
safe_calls=safe_calls,
inherit_options=inherit_options,
subsections=subsections,
ignore_case=ignore_case)
try:
overwrite, add, reset = {
"upgrade": (True, True, False),
"update": (True, False, False),
"reset": (True, True, True),
"add": (False, True, False),
}[mode]
except KeyError:
raise ValueError('Unrecognized importing mode: {}'.format(mode))
self._import(sources, overwrite=overwrite, add=add, reset=reset,
interpolation=interpolation)
### EXCEPTIONS ###
class ConfigFileError(Exception):
"""
The root exception, useful for catching generic errors from this module.
"""
pass
class ParsingError(ConfigFileError):
"""
An error, overcome at parse time, due to bad file formatting.
"""
pass
class NonExistentFileError(ConfigFileError):
"""
A non-existent configuration file.
"""
pass
class InvalidFileError(ConfigFileError):
"""
An invalid configuration file.
"""
pass
class InvalidObjectError(ConfigFileError):
"""
An invalid key found in an importing object.
"""
pass
| kynikos/lib.py.configfile | configfile/__init__.py | Python | mit | 60,029 |
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import sys, os, platform
from glob import glob
import cv2
import numpy as np
from file_methods import Persistent_Dict
from pyglui import ui
from player_methods import transparent_image_overlay
from plugin import Plugin
from copy import copy
# helpers/utils
from version_utils import VersionFormat
#capture
from video_capture import EndofVideoFileError,FileSeekError,FileCaptureError,File_Source
#mouse
from glfw import glfwGetCursorPos,glfwGetWindowSize,glfwGetCurrentContext
from methods import normalize,denormalize
#logging
import logging
logger = logging.getLogger(__name__)
def get_past_timestamp(idx,timestamps):
"""
recursive function to find the most recent valid timestamp in the past
"""
if idx == 0:
# if at the beginning, we can't go back in time.
return get_future_timestamp(idx,timestamps)
if timestamps[idx]:
res = timestamps[idx][-1]
return res
else:
return get_past_timestamp(idx-1,timestamps)
def get_future_timestamp(idx,timestamps):
"""
recursive function to find most recent valid timestamp in the future
"""
if idx == len(timestamps)-1:
# if at the end, we can't go further into the future.
return get_past_timestamp(idx,timestamps)
elif timestamps[idx]:
return timestamps[idx][0]
else:
idx = min(len(timestamps),idx+1)
return get_future_timestamp(idx,timestamps)
def get_nearest_timestamp(past_timestamp,future_timestamp,world_timestamp):
dt_past = abs(past_timestamp-world_timestamp)
dt_future = abs(future_timestamp-world_timestamp) # abs prob not necessary here, but just for sanity
if dt_past < dt_future:
return past_timestamp
else:
return future_timestamp
def correlate_eye_world(eye_timestamps,world_timestamps):
"""
This function takes a list of eye timestamps and world timestamps
and correlates one eye frame per world frame
Returns a mapping that correlates a single eye frame index with each world frame index.
Up and downsampling is used to achieve this mapping.
"""
# return framewise mapping as a list
e_ts = eye_timestamps
w_ts = list(world_timestamps)
eye_frames_by_timestamp = dict(zip(e_ts,range(len(e_ts))))
eye_timestamps_by_world_index = [[] for i in world_timestamps]
frame_idx = 0
try:
current_e_ts = e_ts.pop(0)
except:
logger.warning("No eye timestamps found.")
return eye_timestamps_by_world_index
while e_ts:
# if the current eye timestamp is before the mean of the current world frame timestamp and the next worldframe timestamp
try:
t_between_frames = ( w_ts[frame_idx]+w_ts[frame_idx+1] ) / 2.
except IndexError:
break
if current_e_ts <= t_between_frames:
eye_timestamps_by_world_index[frame_idx].append(current_e_ts)
current_e_ts = e_ts.pop(0)
else:
frame_idx+=1
idx = 0
eye_world_frame_map = []
# some entiries in the `eye_timestamps_by_world_index` might be empty -- no correlated eye timestamp
# so we will either show the previous frame or next frame - whichever is temporally closest
for candidate,world_ts in zip(eye_timestamps_by_world_index,w_ts):
# if there is no candidate, then assign it to the closest timestamp
if not candidate:
# get most recent timestamp, either in the past or future
e_past_ts = get_past_timestamp(idx,eye_timestamps_by_world_index)
e_future_ts = get_future_timestamp(idx,eye_timestamps_by_world_index)
eye_world_frame_map.append(eye_frames_by_timestamp[get_nearest_timestamp(e_past_ts,e_future_ts,world_ts)])
else:
# TODO - if there is a list of len > 1 - then we should check which is the temporally closest timestamp
eye_world_frame_map.append(eye_frames_by_timestamp[eye_timestamps_by_world_index[idx][-1]])
idx += 1
return eye_world_frame_map
class Vis_Eye_Video_Overlay(Plugin):
"""docstring This plugin allows the user to overlay the eye recording on the recording of his field of vision
Features: flip video across horiz/vert axes, click and drag around interface, scale video size from 20% to 100%,
show only 1 or 2 or both eyes
features updated by Andrew June 2015
"""
def __init__(self,g_pool,alpha=0.6,eye_scale_factor=.5,move_around=0,mirror={'0':False,'1':False}, flip={'0':False,'1':False},pos=[(640,10),(10,10)]):
super().__init__(g_pool)
self.order = .6
self.menu = None
# user controls
self.alpha = alpha #opacity level of eyes
self.eye_scale_factor = eye_scale_factor #scale
self.showeyes = 0,1 #modes: any text containg both means both eye is present, on 'only eye1' if only one eye recording
self.move_around = move_around #boolean whether allow to move clip around screen or not
self.video_size = [0,0] #video_size of recording (bc scaling)
#variables specific to each eye
self.eye_frames = []
self.eye_world_frame_map = []
self.eye_cap = []
self.mirror = mirror #do we horiz flip first eye
self.flip = flip #do we vert flip first eye
self.pos = [list(pos[0]),list(pos[1])] #positions of 2 eyes
self.drag_offset = [None,None]
# load eye videos and eye timestamps
if VersionFormat(self.g_pool.meta_info['Capture Software Version'][1:]) < VersionFormat('0.4'):
eye_video_path = os.path.join(g_pool.rec_dir,'eye.avi'),'None'
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye_timestamps.npy'),'None'
else:
eye_video_path = os.path.join(g_pool.rec_dir,'eye0.*'),os.path.join(g_pool.rec_dir,'eye1.*')
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye0_timestamps.npy'),os.path.join(g_pool.rec_dir,'eye1_timestamps.npy')
#try to load eye video and ts for each eye.
for video,ts in zip(eye_video_path,eye_timestamps_path):
try:
self.eye_cap.append(File_Source(self.g_pool,source_path=glob(video)[0],timestamps=np.load(ts)))
except(IndexError,FileCaptureError):
pass
else:
self.eye_frames.append(self.eye_cap[-1].get_frame())
try:
eye_timestamps = list(np.load(ts))
except:
pass
else:
self.eye_world_frame_map.append(correlate_eye_world(eye_timestamps,g_pool.timestamps))
if len(self.eye_cap) == 2:
logger.debug("Loaded binocular eye video data.")
elif len(self.eye_cap) == 1:
logger.debug("Loaded monocular eye video data")
self.showeyes = (0,)
else:
logger.error("Could not load eye video.")
self.alive = False
return
def unset_alive(self):
self.alive = False
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Eye Video Overlay')
self.update_gui()
self.g_pool.gui.append(self.menu)
def update_gui(self):
self.menu.elements[:] = []
self.menu.append(ui.Button('Close',self.unset_alive))
self.menu.append(ui.Info_Text('Show the eye video overlaid on top of the world video. Eye1 is usually the right eye'))
self.menu.append(ui.Slider('alpha',self,min=0.0,step=0.05,max=1.0,label='Opacity'))
self.menu.append(ui.Slider('eye_scale_factor',self,min=0.2,step=0.1,max=1.0,label='Video Scale'))
self.menu.append(ui.Switch('move_around',self,label="Move Overlay"))
if len(self.eye_cap) == 2:
self.menu.append(ui.Selector('showeyes',self,label='Show',selection=[(0,),(1,),(0,1)],labels= ['eye 1','eye 2','both'],setter=self.set_showeyes))
if 0 in self.showeyes:
self.menu.append(ui.Switch('0',self.mirror,label="Eye 1: Horiz. Flip"))
self.menu.append(ui.Switch('0',self.flip,label="Eye 1: Vert. Flip"))
if 1 in self.showeyes:
self.menu.append(ui.Switch('1',self.mirror,label="Eye 2: Horiz Flip"))
self.menu.append(ui.Switch('1',self.flip,label="Eye 2: Vert Flip"))
def set_showeyes(self,new_mode):
#everytime we choose eye setting (either use eye 1, 2, or both, updates the gui menu to remove certain options from list)
self.showeyes = new_mode
self.update_gui()
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def update(self,frame,events):
for eye_index in self.showeyes:
requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]
#1. do we need a new frame?
if requested_eye_frame_idx != self.eye_frames[eye_index].index:
# do we need to seek?
if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
# if we just need to seek by one frame, its faster to just read one and and throw it away.
_ = self.eye_cap[eye_index].get_frame()
if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
# only now do I need to seek
self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
# reading the new eye frame frame
try:
self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
except EndofVideoFileError:
logger.warning("Reached the end of the eye video for eye video {}.".format(eye_index))
else:
#our old frame is still valid because we are doing upsampling
pass
#2. dragging image
if self.drag_offset[eye_index] is not None:
pos = glfwGetCursorPos(glfwGetCurrentContext())
pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
else:
self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]
#3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))
#4. flipping images, converting to greyscale
eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
if self.mirror[str(eye_index)]:
eyeimage = np.fliplr(eyeimage)
if self.flip[str(eye_index)]:
eyeimage = np.flipud(eyeimage)
#5. finally overlay the image
x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
def on_click(self,pos,button,action):
if self.move_around == 1 and action == 1:
for eye_index in self.showeyes:
if self.pos[eye_index][0] < pos[0] < self.pos[eye_index][0]+self.video_size[0] and self.pos[eye_index][1] < pos[1] < self.pos[eye_index][1] + self.video_size[1]:
self.drag_offset[eye_index] = self.pos[eye_index][0]-pos[0],self.pos[eye_index][1]-pos[1]
return
else:
self.drag_offset = [None,None]
def get_init_dict(self):
return {'alpha':self.alpha,'eye_scale_factor':self.eye_scale_factor,'move_around':self.move_around,'mirror':self.mirror,'flip':self.flip,'pos':self.pos,'move_around':self.move_around}
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui()
| fsxfreak/esys-pbi | src/pupil/pupil_src/player/vis_eye_video_overlay.py | Python | mit | 12,941 |
#!/usr/bin/env python
import os,sys
import string
import re
import pickle
stopword_file = './stopwords'
class CleanTweet(object):
"""
case-sensitive, removed url, hashtag#, special term like 'RT', and reply@
"""
_url = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
_tweeterid = r'@\w{1,15}'
_retweet = r'^RT'
_nonascii = r'[^\x00-\x7F]+'
filter_re = [_url, _tweeterid, _retweet, _nonascii]
def __init__(self, rawtweet, stopwords=[]):
self._tweet = rawtweet
cleantweet = rawtweet
for ptn in self.filter_re:
cleantweet = re.sub(ptn, '', cleantweet)
punct = string.punctuation#.replace("'","")
cleantweet = cleantweet.translate(None, punct)
self._toks = cleantweet.lower().replace('\xe2','').split()
self._toks = [item.strip() for item in self._toks if item not in stopwords]
for w in self._toks:
if '\xe2' in w:
print w
self._cleantweet = ' '.join(self._toks)
def rawtweet(self):
return self._tweet
def cleantweet(self):
return self._cleantweet
def toks(self):
return self._toks
def __str__(self):
return self._cleantweet
infilename = sys.argv[1]
outfilename = sys.argv[2]
tweets = []
stopwords = []
with open(stopword_file, 'rb') as fs:
for word in fs:
stopwords.append(word.strip())
fs.close()
with open(infilename, 'rb') as fi, open(outfilename, 'wb') as fo:
infile = fi.read()
start = '['
stop = ']'
buf = ''
flag = False
for c in infile:
if c == start:
flag = True
continue
elif c == stop:
tweetobj = CleanTweet(buf, stopwords).cleantweet()
if tweetobj != '':
tweets.append(tweetobj)
buf = ''
flag = False
if flag:
buf += c
if len(tweets) >= 1000000:
break
pickle.dump(tweets, fo)
fi.close()
fo.close()
with open(outfilename, 'rb') as fo:
newlist = pickle.load(fo)
for t in newlist:
print t
| luozhaoyu/big-data-system | assignment3/partB/parsetweet.py | Python | mit | 2,168 |
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="surface.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/surface/hoverlabel/font/_sizesrc.py | Python | mit | 423 |
Flask==0.10.1
Jinja2==2.7.2
MarkupSafe==0.18
Werkzeug==0.9.4
distribute==0.6.31
itsdangerous==0.23
lxml==3.3.1
pygal==1.3.1
wsgiref==0.1.2
| birknilson/oyster | examples/piechart/requirements.py | Python | mit | 139 |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from apps.employees import ajax
from . import views
urlpatterns = (
url(r'^home/$', login_required(views.home), name="employee_home_redirect"),
url(r'^(?P<pk>[\d]+)/$', login_required(views.EmployeeDetail.as_view()), name="employee_detail"),
url(r'^schedule/add$', login_required(views.ScheduleAdd.as_view()), name="employee_schedule_add"),
url(r'^schedule/$', login_required(views.schedule), name="employee_schedule"),
url(r'^admin/$', login_required(views.EmployeeAdminPanel.as_view()), name="employee_admin"),
url(r'^sub-board/$', login_required(views.SubBoard.as_view()), name="sub_board"),
url(r'^ajax/take_slip/$', login_required(ajax.SubSlipAjax.as_view()), name="take_slip"),
)
| lmann4/cis526-final-project | resource_management/apps/employees/urls.py | Python | mit | 807 |
from math import *
from Vertex import *
#Length of the three subparts of the robot leg
L1 = 51.0
L2 = 63.7
L3 = 93.0
Alpha = 20.69 #Mecanic constraint on Theta 2
Beta = 5.06 #Mecanic constraint on Theta 3
# Check if the given float match with radian (between 2PI and -2PI)
def radValidation (radian):
return (radian <= 2 * pi and radian >= -2 * pi)
# Direct kinamatics for our considered robot (specific of our leg setting)
def leg_dk(theta1, theta2, theta3, l1=L1, l2=L2, l3=L3, alpha = Alpha, beta = Beta):
Angle = Vertex(theta1,theta2,theta3)
#Modification od theta1 and theta2 according constraint
theta2 += alpha
theta3 = 90-(alpha+beta+theta3)
#print "Angles : " + str(theta1) + " ; " + str(theta2) + " ; " + str(theta3)
theta1=radians(theta1)
theta2=-radians(theta2)
theta3=-radians(theta3)
#Storing all the sinus and cosinus into variable in order to simplify and run the calculation only once
c_1 = cos(theta1)
c_2 = cos(theta2)
c_2_3 = cos(theta2 + theta3)
s_1 = sin(theta1)
s_2 = sin(theta2)
s_2_3 = sin(theta2 + theta3)
#calculation of the projections and the differences due to the robot setting
projection = l1 + (l2 * c_2) + (l3 * c_2_3)
#Calculation of the final position
Final = Vertex((projection * c_1), (projection * s_1), ((l2 * s_2) + (l3 * s_2_3)))
return Final
leg_dk(0, 0, 0)
leg_dk(90, 0, 0)
leg_dk(180, -30.501, -67.819)
leg_dk(0, -30.645, 38.501)
| Foxnox/robotique-delpeyroux-monseigne | src/direct_kinematics.py | Python | mit | 1,409 |
class cube():
CUBE_STATUS = "Off"
PATTERN = "Hello"
def __init__(self):
pass
@staticmethod
def status():
"""
Return dictionary of details about the cube
:return: Dictionary
"""
return {
'pattern': cube.PATTERN,
'status': cube.CUBE_STATUS
}
| kamodev/8x8x8_RPi_Zero_LED_Cube | rpi_zero_ledcube/cube.py | Python | mit | 343 |
import serial
import struct
import time
# j = 2 means open, j = 1 means close shutter
def command_shutter(port, j):
# first, start the serial port to communicate with the arduino
if port.isOpen():
print "port open"
port.write(struct.pack('>B', j))
return 1
else:
return 0
#while(1 == 1):
#cover_or_not = int(input('Enter a number. 1 will cover the Lenses of the NDI, while 2 will open the blinds.'))
#data.write(struct.pack('>B',cover_or_not)) | srkiyengar/NewGripper | src/shutter.py | Python | mit | 509 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy import DDL, text
SQL_FUNCTION_NATSORT = '''
CREATE FUNCTION indico.natsort(value TEXT)
RETURNS bytea
AS $$
SELECT string_agg(
convert_to(coalesce(r[2], length(length(r[1])::text) || length(r[1])::text || r[1]), 'SQL_ASCII'),
' '
)
FROM regexp_matches(value, '0*([0-9]+)|([^0-9]+)', 'g') r;
$$
LANGUAGE SQL IMMUTABLE STRICT;
'''
def _should_create_function(ddl, target, connection, **kw):
sql = """
SELECT COUNT(*)
FROM information_schema.routines
WHERE routine_schema = 'indico' AND routine_name = 'natsort'
"""
count = connection.execute(text(sql)).scalar()
return not count
def create_natsort_function(conn):
DDL(SQL_FUNCTION_NATSORT).execute_if(callable_=_should_create_function).execute(conn)
| mic4ael/indico | indico/core/db/sqlalchemy/custom/natsort.py | Python | mit | 1,070 |
import os
import cPickle
import gzip
from collections import namedtuple
import numpy as np
import matplotlib
matplotlib.rcParams.update({'axes.labelsize': 9,
'xtick.labelsize' : 9,
'ytick.labelsize' : 9,
'axes.titlesize' : 11})
import matplotlib.pyplot as plt
import brewer2mpl
allcolors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
from hips.plotting.layout import create_axis_at_location, create_figure
from experiment_helper import load_hipp_data
Results = namedtuple(
"Results", ["name", "loglikes", "predictive_lls",
"N_used", "alphas", "gammas",
"rates", "obs_hypers",
"samples", "timestamps"])
def plot_results(alpha_a_0s, Ks_alpha_a_0,
gamma_a_0s, Ks_gamma_a_0,
figdir="."):
# Plot the number of inferred states as a function of params
fig = create_figure((5,1.5))
ax = create_axis_at_location(fig, 0.6, 0.5, 1.7, .8, transparent=True)
plt.figtext(0.05/5, 1.25/1.5, "A")
ax.boxplot(Ks_alpha_a_0, positions=np.arange(1,1+len(alpha_a_0s)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(alpha_a_0s)
plt.xlim(0.5,4.5)
plt.ylim(40,90)
# plt.yticks(np.arange(0,101,20))
ax.set_xlabel("$a_{\\alpha_0}$")
ax.set_ylabel("Number of States")
ax = create_axis_at_location(fig, 3.1, 0.5, 1.7, .8, transparent=True)
plt.figtext(2.55/5, 1.25/1.5, "B")
ax.boxplot(Ks_gamma_a_0, positions=np.arange(1,1+len(gamma_a_0s)),
boxprops=dict(color=allcolors[1]),
whiskerprops=dict(color=allcolors[0]),
flierprops=dict(color=allcolors[1]))
ax.set_xticklabels(gamma_a_0s)
plt.xlim(0.5,4.5)
plt.ylim(40,90)
# plt.yticks(np.arange(0,101,20))
ax.set_xlabel("$a_{\\gamma}$")
ax.set_ylabel("Number of States")
plt.savefig(os.path.join(figdir, "figure7.pdf"))
plt.savefig(os.path.join(figdir, "figure7.png"))
# Figure 7: Hippocampal inference trajectories
dataset = "hipp_2dtrack_a"
N, S_train, pos_train, S_test, pos_test, center, radius = \
load_hipp_data(dataname=dataset)
# Load results
runnum = 1
results_dir = os.path.join("results", dataset, "run%03d" % runnum)
# Load alpha_a_0 results
alpha_a_0s = [1.0, 5.0, 10.0, 100.0]
alpha_a_0_results = []
for alpha_a_0 in alpha_a_0s:
results_type = "hdphmm_scale_alpha_a_0%.1f" % alpha_a_0
print "Loading ", results_type
results_file = os.path.join(results_dir, results_type + ".pkl.gz")
with gzip.open(results_file, "r") as f:
results = cPickle.load(f)
alpha_a_0_results.append(results.N_used[-2000:])
gamma_a_0s = [1.0, 5.0, 10.0, 100.0]
gamma_a_0_results = []
for gamma_a_0 in gamma_a_0s:
results_type = "hdphmm_scale_gamma_a_0%.1f" % gamma_a_0
print "Loading ", results_type
results_file = os.path.join(results_dir, results_type + ".pkl.gz")
with gzip.open(results_file, "r") as f:
results = cPickle.load(f)
gamma_a_0_results.append(results.N_used[-2000:])
# alpha_obss = [0.1, 0.5, 1.0, 2.0, 2.5, 5.0, 10.0]
# alpha_obs_results = []
# for alpha_obs in alpha_obss:
# results_type = "hdphmm_scale_alpha_obs%.1f" % alpha_obs
# print "Loading ", results_type
# results_file = os.path.join(results_dir, results_type + ".pkl.gz")
# with gzip.open(results_file, "r") as f:
# results = cPickle.load(f)
#
# alpha_obs_results.append(results.N_used[-2000:])
plot_results(alpha_a_0s, alpha_a_0_results,
gamma_a_0s, gamma_a_0_results,
figdir=results_dir)
| slinderman/pyhsmm_spiketrains | experiments/make_figure7.py | Python | mit | 3,751 |
from dolfin import *
import ipdb
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh()
domain_vertices = [Point(1.0, -1.0),
Point(6.0, -1.0),
Point(6.0, 1.0),
Point(1.0, 1.0),
Point(1.0, 6.0),
Point(-1.0, 6.0),
Point(-1.0, 1.0),
Point(-6.0, 1.0),
Point(-6.0, -1.0),
Point(-1.0, -1.0),
Point(-1.0, -6.0),
Point(1.0, -6.0),
Point(1.0,-1.0),
Point(1.0, -1.0)]
# Generate mesh and plot
PolygonalMeshGenerator.generate(mesh, domain_vertices, 0.75);
cell_markers = CellFunction("bool", mesh)
cell_markers.set_all(False)
origin = Point(0.0, 0.0)
for cell in cells(mesh):
p = cell.midpoint()
# print p
if p.distance(origin) < 2:
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
cell_markers = CellFunction("bool", mesh)
cell_markers.set_all(False)
origin = Point(0.0, 0.0)
for cell in cells(mesh):
p = cell.midpoint()
# print p
if p.distance(origin) < 1:
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
class noflow1(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-6.0,-1.0)) and near(x[0],1.0)
class noflow4(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (1.0, 6.0)) and near(x[0],1.0)
class noflow5(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-6.0,-1.0)) and near(x[0],-1.0)
class noflow8(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (1.0, 6.0)) and near(x[0],-1.0)
class noflow2(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (1.0,6.0)) and near(x[1],-1.0)
class noflow3(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (1.0,6.0)) and near(x[1],1.0)
class noflow6(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-6.0,-1.0)) and near(x[1],-1.0)
class noflow7(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-6.0,-1.0)) and near(x[1],1.0)
Noflow1 = noflow1()
Noflow2 = noflow2()
Noflow3 = noflow3()
Noflow4 = noflow4()
Noflow5 = noflow5()
Noflow6 = noflow6()
Noflow7 = noflow7()
Noflow8 = noflow8()
class inflow1(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (-1.0,1.0)) and near(x[1],-6.0)
class inflow2(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (-1.0,1.0)) and near(x[1],6.0)
class outflow1(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-1.0,1.0)) and near(x[0],6.0)
class outflow2(SubDomain):
def inside(self, x, on_boundary):
return between(x[1], (-1.0,1.0)) and near(x[0],-6.0)
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
Noflow1.mark(boundaries, 1)
Noflow2.mark(boundaries, 1)
Noflow3.mark(boundaries, 1)
Noflow4.mark(boundaries, 1)
Noflow5.mark(boundaries, 1)
Noflow6.mark(boundaries, 1)
Noflow7.mark(boundaries, 1)
Noflow8.mark(boundaries, 1)
Inflow1 = inflow1()
Inflow2 = inflow2()
Outlow1 = outflow1()
Outlow2 = outflow2()
Inflow1.mark(boundaries, 2)
Inflow2.mark(boundaries, 2)
Outlow1.mark(boundaries, 3)
Outlow2.mark(boundaries, 3)
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("-sin(3.0*t)", t=0.0)
p_in2 = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),boundaries,1)
inflow = DirichletBC(Q, p_in, boundaries,2)
outflow = DirichletBC(Q, p_in2, boundaries,3)
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
t = dt
while t < T + DOLFIN_EPS:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution
plot(p1, title="Pressure", rescale=True)
plot(u1, title="Velocity", scalarbar=True)
# Save to file
ufile << u1
pfile << p1
# Move to next time step
u0.assign(u1)
t += dt
print "t =", t
# Hold plot
interactive()
| wathen/PhD | MHD/FEniCS/mesh/test.py | Python | mit | 5,783 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import sys
import cPickle
import traceback
import types
import os
import logging
from storage import Storage
from http import HTTP
from html import BEAUTIFY
logger = logging.getLogger("web2py")
__all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2']
class TicketStorage(Storage):
"""
defines the ticket object and the default values of its members (None)
"""
def __init__(
self,
db=None,
tablename='web2py_ticket'
):
self.db = db
self.tablename = tablename
def store(self, request, ticket_id, ticket_data):
"""
stores the ticket. It will figure out if this must be on disk or in db
"""
if self.db:
self._store_in_db(request, ticket_id, ticket_data)
else:
self._store_on_disk(request, ticket_id, ticket_data)
def _store_in_db(self, request, ticket_id, ticket_data):
table = self._get_table(self.db, self.tablename, request.application)
table.insert(ticket_id=ticket_id,
ticket_data=cPickle.dumps(ticket_data),
created_datetime=request.now)
logger.error('In FILE: %(layer)s\n\n%(traceback)s\n' % ticket_data)
def _store_on_disk(self, request, ticket_id, ticket_data):
ef = self._error_file(request, ticket_id, 'wb')
try:
cPickle.dump(ticket_data, ef)
finally:
ef.close()
def _error_file(self, request, ticket_id, mode, app=None):
root = request.folder
if app:
root = os.path.join(os.path.join(root, '..'), app)
errors_folder = os.path.abspath(os.path.join(root, 'errors'))#.replace('\\', '/')
return open(os.path.join(errors_folder, ticket_id), mode)
def _get_table(self, db, tablename, app):
tablename = tablename + '_' + app
table = db.get(tablename, None)
if table is None:
db.rollback() # not necessary but one day
# any app may store tickets on DB
table = db.define_table(
tablename,
db.Field('ticket_id', length=100),
db.Field('ticket_data', 'text'),
db.Field('created_datetime', 'datetime'),
)
return table
def load(
self,
request,
app,
ticket_id,
):
if not self.db:
ef = self._error_file(request, ticket_id, 'rb', app)
try:
return cPickle.load(ef)
finally:
ef.close()
table = self._get_table(self.db, self.tablename, app)
rows = self.db(table.ticket_id == ticket_id).select()
if rows:
return cPickle.loads(rows[0].ticket_data)
return None
class RestrictedError(Exception):
"""
class used to wrap an exception that occurs in the restricted environment
below. the traceback is used to log the exception and generate a ticket.
"""
def __init__(
self,
layer='',
code='',
output='',
environment=None,
):
"""
layer here is some description of where in the system the exception
occurred.
"""
if environment is None: environment = {}
self.layer = layer
self.code = code
self.output = output
self.environment = environment
if layer:
try:
self.traceback = traceback.format_exc()
except:
self.traceback = 'no traceback because template parting error'
try:
self.snapshot = snapshot(context=10,code=code,
environment=self.environment)
except:
self.snapshot = {}
else:
self.traceback = '(no error)'
self.snapshot = {}
def log(self, request):
"""
logs the exception.
"""
try:
d = {
'layer': str(self.layer),
'code': str(self.code),
'output': str(self.output),
'traceback': str(self.traceback),
'snapshot': self.snapshot,
}
ticket_storage = TicketStorage(db=request.tickets_db)
ticket_storage.store(request, request.uuid.split('/',1)[1], d)
return request.uuid
except:
logger.error(self.traceback)
return None
def load(self, request, app, ticket_id):
"""
loads a logged exception.
"""
ticket_storage = TicketStorage(db=request.tickets_db)
d = ticket_storage.load(request, app, ticket_id)
self.layer = d['layer']
self.code = d['code']
self.output = d['output']
self.traceback = d['traceback']
self.snapshot = d.get('snapshot')
def __str__(self):
# safely show an useful message to the user
try:
output = self.output
if isinstance(output, unicode):
output = output.encode("utf8")
elif not isinstance(output, str):
output = str(output)
except:
output = ""
return output
def compile2(code,layer):
"""
The +'\n' is necessary else compile fails when code ends in a comment.
"""
return compile(code.rstrip().replace('\r\n','\n')+'\n', layer, 'exec')
def restricted(code, environment=None, layer='Unknown'):
"""
runs code in environment and returns the output. if an exception occurs
in code it raises a RestrictedError containing the traceback. layer is
passed to RestrictedError to identify where the error occurred.
"""
if environment is None: environment = {}
environment['__file__'] = layer
environment['__name__'] = '__restricted__'
try:
if type(code) == types.CodeType:
ccode = code
else:
ccode = compile2(code,layer)
exec ccode in environment
except HTTP:
raise
except RestrictedError:
# do not encapsulate (obfuscate) the original RestrictedError
raise
except Exception, error:
# extract the exception type and value (used as output message)
etype, evalue, tb = sys.exc_info()
# XXX Show exception in Wing IDE if running in debugger
if __debug__ and 'WINGDB_ACTIVE' in os.environ:
sys.excepthook(etype, evalue, tb)
output = "%s %s" % (etype, evalue)
raise RestrictedError(layer, code, output, environment)
def snapshot(info=None, context=5, code=None, environment=None):
"""Return a dict describing a given traceback (based on cgitb.text)."""
import os, types, time, linecache, inspect, pydoc, cgitb
# if no exception info given, get current:
etype, evalue, etb = info or sys.exc_info()
if type(etype) is types.ClassType:
etype = etype.__name__
# create a snapshot dict with some basic information
s = {}
s['pyver'] = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
s['date'] = time.ctime(time.time())
# start to process frames
records = inspect.getinnerframes(etb, context)
s['frames'] = []
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
# basic frame information
f = {'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum}
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = cgitb.scanvars(reader, frame, locals)
# if it is a view, replace with generated code
if file.endswith('html'):
lmin = lnum>context and (lnum-context) or 0
lmax = lnum+context
lines = code.split("\n")[lmin:lmax]
index = min(context, lnum) - 1
if index is not None:
i = lnum - index
for line in lines:
f['lines'][i] = line.rstrip()
i += 1
# dump local variables (referenced in current line only)
f['dump'] = {}
for name, where, value in vars:
if name in f['dump']: continue
if value is not cgitb.__UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
f['dump'][name] = pydoc.text.repr(value)
else:
f['dump'][name] = 'undefined'
s['frames'].append(f)
# add exception type, value and attributes
s['etype'] = str(etype)
s['evalue'] = str(evalue)
s['exception'] = {}
if isinstance(evalue, BaseException):
for name in dir(evalue):
# prevent py26 DeprecatedWarning:
if name!='message' or sys.version_info<(2.6):
value = pydoc.text.repr(getattr(evalue, name))
s['exception'][name] = value
# add all local values (of last frame) to the snapshot
s['locals'] = {}
for name, value in locals.items():
s['locals'][name] = pydoc.text.repr(value)
# add web2py environment variables
for k,v in environment.items():
if k in ('request', 'response', 'session'):
s[k] = BEAUTIFY(v)
return s
| SEA000/uw-empathica | empathica/gluon/restricted.py | Python | mit | 9,911 |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Read in an image
image = mpimg.imread('signs_vehicles_xygrad.png')
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_s_x = np.absolute(sobelx)
abs_s_y = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
# Important, y should come before x here if we want to detect lines
dir_grad = np.arctan2(abs_s_y, abs_s_x)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(dir_grad)
# 6) Return this mask as your binary_output image
binary_output[(dir_grad >= thresh[0]) & (dir_grad <= thresh[1])] = 1
return binary_output
# Run the function
dir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(dir_binary, cmap='gray')
ax2.set_title('Thresholded Grad. Dir.', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| swirlingsand/self-driving-car-nanodegree-nd013 | play/advanced_lane_finding-direction_of_the_gradient_exercise.py | Python | mit | 1,656 |
import MySQLdb
import urllib2, urllib, re, sys
import xml
from datetime import datetime
## gets woeid from query -helper function
def getwoeid(searchstring):
try:
import xml.etree.ElementTree as ET
namespaces = {'yweather': 'http://www.yahooapis.com/v1/base.rng'} # add more as needed
#get records
proxy = urllib2.ProxyHandler({'https': '10.10.78.62:3128','http': '10.10.78.62:3128'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
url = "http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20geo.placefinder%20where%20text=%22"+ urllib.quote_plus(searchstring) + ",%22%20&format=xml"
f = urllib2.urlopen(url)
xmldata = f.read()
root = ET.fromstring(xmldata)
#root = ET.parse('data_city.xml').getroot()
cities = root.find('results').findall('Result')
fcities = open('cities.txt', 'w')
fwoeid = open('woeids.txt', 'w')
for thiscity in cities:
fcities.write(thiscity.find('city').text + ", " + thiscity.find('state').text + ", " + thiscity.find('country').text + "\n")
fwoeid.write(thiscity.find('woeid').text + "\n")
fcities.close()
fwoeid.close()
except Exception as e:
print '<>',e
## gets woeid from query -main program
#takes searchstring as arg
if __name__ == '__main__':
getwoeid(sys.argv[1])
| rishirdua/eep702-software-laboratory | 03_ubuntu-weather-app/code/woeidfromquery.py | Python | mit | 1,278 |
import asyncio
import random
import names
from chilero.web.test import asynctest
from chilero.pg import Resource
from chilero.pg.test import TestCase, TEST_DB_SUFFIX
import json
class Friends(Resource):
order_by = 'name ASC'
search_fields = ['name']
allowed_fields = ['name', 'meta']
required_fields = ['name']
allow_order_by = ['name']
def serialize_object(self, row):
return dict(
id=row[0],
name=row[1],
meta=row[2],
url=self.get_object_url(row[0])
)
def serialize_list_object(self, row):
return dict(
name=row[1],
url=self.get_object_url(row[0])
)
class Friends2(Resource):
order_by = 'name ASC'
search_fields = ['name']
allowed_fields = ['name', 'meta']
required_fields = ['name']
allow_order_by = ['name']
table_name = 'friends'
def serialize_object(self, row):
return dict(
id=row[0],
name=row[1],
meta=row[2],
url=self.get_object_url(row[0])
)
def serialize_list_object(self, row):
return dict(
name=row[1],
url=self.get_object_url(row[0])
)
def index(self):
condition = dict(name='pedro', meta='{}')
index = yield from self.do_index(condition)
return self.response(index)
class BaseTestCase(TestCase):
settings = dict(
db_url='postgres://postgres@localhost:5432/chilero_pg_{}'.format(
TEST_DB_SUFFIX
)
)
routes = [
['/friends', Friends],
['/friends2', Friends2]
]
@asyncio.coroutine
def _create_friend(self, **kwargs):
defaults = dict(
name=self._random_string(),
meta=json.dumps(dict(name='name1', data2='data2'))
)
return(
yield from self._create_and_get('/friends', kwargs, defaults)
)
class TestAdvancedOptions(BaseTestCase):
@asyncio.coroutine
def _a_lot_of_friends(self):
# create a lot of friends
all_names = []
for i in range(100):
name = names.get_full_name()+str(i)
all_names.append(name)
_, f = yield from self._create_friend(name=name)
t = yield from _.text()
print(t)
assert _.status==201
_.close()
return all_names
@asynctest
def test_pagination(self):
yield from self._a_lot_of_friends()
# list with default values
# page 1
r = yield from self._get_json(self.full_url('/friends'))
assert r['data']['count'] >= 100
assert r['data']['prev'] == None
assert 'offset=20' in r['data']['next']
assert 'limit=20' in r['data']['next']
assert len(r['index']) == r['data']['length']
# page 2
r = yield from self._get_json(r['data']['next'])
assert 'offset=0' in r['data']['prev']
assert 'offset=40' in r['data']['next']
assert len(r['index']) == r['data']['length']
assert len(r['index'][0].keys()) == 2
@asynctest
def test_pagination_no_limit(self):
yield from self._a_lot_of_friends()
# list with no limit
r = yield from self._get_json(self.full_url('/friends?limit=0'))
assert r['data']['count'] >= 100
assert r['data']['prev'] == None
assert r['data']['next'] == None
assert r['data']['length'] == r['data']['count']
assert len(r['index']) == r['data']['count']
@asynctest
def test_search_pagination(self):
rnames = list((yield from self._a_lot_of_friends()))
rname = random.choice(rnames).split()[0]
for i in range(5):
name = '{} {}'.format(rname, names.get_last_name())
_, friend = yield from self._create_friend(name=name)
_.close()
rname = rname.lower()
r = yield from self._get_json(
self.full_url('/friends?search={}&limit=1'.format(rname))
)
assert r['data']['count'] >= 1
assert rname in r['data']['next']
while r['data']['next']:
r = yield from self._get_json(r['data']['next'])
if r['data']['next'] is not None:
assert rname in r['data']['next']
assert rname in r['data']['prev']
rname.lower() in r['index'][0]['name'].lower()
@asynctest
def test_oreder_by_ASC(self):
yield from self._a_lot_of_friends()
name = 'Abel Barrera'
_, friend = yield from self._create_friend(name=name)
_.close()
url = self.full_url('/friends?order_by={}'.format('name'))
resp = yield from self._get_json(url)
assert resp['index'][0]['name'].startswith('A')
@asynctest
def test_oreder_by_400(self):
yield from self._a_lot_of_friends()
url = self.full_url('/friends?order_by={}'.format('other'))
resp = yield from self._get(url)
assert resp.status == 400
@asynctest
def test_oreder_by_desc(self):
yield from self._a_lot_of_friends()
defaults = dict(
name='Zarahi zuna'
)
resp = yield from self._create('/friends', defaults)
assert resp.status == 201
resp.close()
url = self.full_url('/friends?order_by={}'.format('-name'))
resp = yield from self._get_json(url)
assert resp['index'][0]['name'].startswith('Z')
class TestBasic(BaseTestCase):
# Test common REST actions
@asynctest
def test_index(self):
resp = yield from self._get(self.full_url('/friends'))
assert resp.status == 200
resp.close()
@asynctest
def test_index_json(self):
resp = yield from self._index('/friends')
assert isinstance(resp, dict)
assert 'index' in resp
@asynctest
def test_index_json_condition(self):
resp = yield from self._index('/friends2')
assert isinstance(resp, dict)
assert 'index' in resp
@asynctest
def test_create(self):
name = self._random_string()
_, friend = yield from self._create_friend(name=name)
assert _.status == 201
_.close()
assert friend['name'] == name
assert len(friend.keys()) == 4
efriend = yield from self._delete(friend['url'])
assert efriend.status==200
@asynctest
def test_create_error(self):
_, friend = yield from self._create_friend(wrong_field=123)
assert _.status == 400
_.close()
@asynctest
def test_create_conflict(self):
name = names.get_full_name()
_, friend = yield from self._create_friend(name=name)
_.close()
_, friend = yield from self._create_friend(name=name)
assert _.status == 409
_.close()
@asynctest
def test_update(self):
_, friend = yield from self._create_friend()
_.close()
new_name = self._random_string()
presp = yield from self._patch(friend['url'], name=new_name)
assert presp.status == 204
presp.close()
updated_friend = yield from self._get_json(friend['url'])
assert updated_friend['body']['name'] == new_name
@asynctest
def test_search(self):
name = 'some known name'
_, friend = yield from self._create_friend(name=name)
_.close()
results = yield from self._search('/friends', terms='known name')
assert len(results['index']) > 0
assert results['index'][0]['name'] == name
@asynctest
def test_view_404(self):
resp = yield from self._get(self.full_url('/friends/999999'))
assert resp.status == 404
resp.close()
@asynctest
def test_update_400(self):
_, friend = yield from self._create_friend()
_.close()
new_name = self._random_string()
presp = yield from self._patch(friend['url'], names=new_name)
assert presp.status == 400
presp.close()
@asynctest
def test_update_empty_required_400(self):
_, friend = yield from self._create_friend()
_.close()
new_name = " "
presp = yield from self._patch(friend['url'], name=new_name)
assert presp.status == 400
presp.close()
@asynctest
def test_update_None_required_400(self):
_, friend = yield from self._create_friend()
_.close()
new_name = None
presp = yield from self._patch(friend['url'], name=new_name)
assert presp.status == 400
presp.close()
| dmonroy/chilero.pg | tests/test_sample_app.py | Python | mit | 8,628 |
# $description: Split into layer cells
# $autorun
# $show-in-menu
import pya
import sys
sys.stderr = sys.stdout
class MenuAction(pya.Action):
def __init__(self, title, shortcut, action):
self.title = title
self.shortcut = shortcut
self.action = action
def triggered(self):
self.action()
def make_layer_cells():
#Load View
app = pya.Application.instance()
mw = app.main_window()
lv = mw.current_view()
ly = lv.active_cellview().layout()
dbu = ly.dbu
if lv==None:
raise Exception("No view selected")
cv = lv.cellview(lv.active_cellview_index())
#Loop through layers
for layer in [1,2,3]:
new_cell = ly.create_cell(cv.cell.display_title() + "L" + str(layer))
# Loop through instances
for inst in cv.cell.each_inst():
#Calculate location of instances
itrans = pya.ICplxTrans.from_trans(pya.CplxTrans())
box = inst.bbox().transformed(itrans)
x = box.center().x
y = box.center().y
#Create new cell to represent given layer
new_subcell = ly.create_cell(inst.cell.display_title() + "L" + str(layer))
#Map Bounding box and shape layers to new layer
lm = pya.LayerMapping()
lm.map(ly.layer(1,3), ly.layer(1,3))
lm.map(ly.layer(layer, 0), ly.layer(layer, 0))
lm.map(ly.layer(layer,1), ly.layer(layer, 0))
#Create Instance Array to place into cell
array = pya.CellInstArray()
#Copy shapes, place, and insert
array.cell_index=new_subcell.cell_index()
new_subcell.copy_shapes(inst.cell, lm)
array.trans = pya.Trans(pya.Point(x,y))
new_cell.insert(array)
x = MenuAction("Make Layer Cells", "", make_layer_cells)
app = pya.Application.instance()
mw = app.main_window()
menu = mw.menu()
menu.insert_separator("@hcp_context_menu.end", "sep_layer_cells")
menu.insert_item("@hcp_context_menu.end", "layer_cells", x) | calebjordan/klayout-macros | pymacros/Make Layer Cells.py | Python | mit | 2,057 |
import numpy as np
import pandas as pd
import tensorflow as tf
import scipy.misc
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Sequential
from keras.layers import Input, Dropout, Activation, LSTM, Conv2D, Conv2DTranspose, Dense, TimeDistributed, Flatten, Reshape, Cropping2D, GaussianNoise, Concatenate, BatchNormalization, SeparableConv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.losses import mean_squared_error
from keras.optimizers import Adadelta, RMSprop
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
#K.set_learning_phase(1) #set learning phase
sequences_per_batch = 1
epochs = 100
image_size = 240
sequence_length = 155
sequence_start = 0
train_seq = 1
train_cnt = int(sequence_length / train_seq)
file_list = 'val.txt'
input_mode = 'test'
input_data = 4
input_attention = 3
input_dimension = input_data + input_attention
output_dimension = 3
base = 42
folder = 'data'
# load data list
files = np.genfromtxt(file_list, dtype='str')
# define model
def conv_block(m, dim, acti, bn, res, do=0.2):
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(m)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
n = TimeDistributed(Dropout(do))(n) if do else n
n = TimeDistributed(Conv2D(dim, 6, padding='same'))(n)
n = TimeDistributed(LeakyReLU())(n)
n = BatchNormalization()(n) if bn else n
return Concatenate()([m, n]) if res else n
def level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
if depth > 0:
n = conv_block(m, dim, acti, bn, res)
m = TimeDistributed(MaxPooling2D())(n) if mp else TimeDistributed(Conv2D(dim, 4, strides=2, padding='same'))(n)
print(n.shape)
print(m.shape)
m = level_block(m, int(inc*dim), depth-1, inc, acti, do, bn, mp, up, res)
if up:
m = TimeDistributed(UpSampling2D())(m)
m = TimeDistributed(Conv2D(dim, 4, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
else:
m = TimeDistributed(Conv2DTranspose(dim, 4, strides=2, padding='same'))(m)
m = TimeDistributed(LeakyReLU())(m)
n = Concatenate()([n, m])
m = conv_block(n, dim, acti, bn, res)
else:
m = conv_block(m, dim, acti, bn, res, do)
l = TimeDistributed(Flatten())(m)
#l = LSTM(4 * 4 * 128, stateful=True, return_sequences=True)(l)
l = LSTM(2048, stateful=True, return_sequences=True)(l)
l = TimeDistributed(Reshape((2, 2, 2048/4)))(l)
m = l
#m = Concatenate()([l, m])
m = conv_block(m, dim, acti, bn, res, do)
return m
def UNet(input_shape, out_ch=1, start_ch=64, depth=7, inc_rate=1.5, activation='relu',
dropout=0.4, batchnorm=True, maxpool=True, upconv=True, residual=False):
i = Input(batch_shape=input_shape)
o = TimeDistributed(ZeroPadding2D(padding=8))(i)
o = TimeDistributed(SeparableConv2D(start_ch, 7, padding='same'))(o)
o = level_block(o, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
o = TimeDistributed(Cropping2D(cropping=8))(o)
o = TimeDistributed(Conv2D(out_ch, 1, activation='tanh'))(o)
return Model(inputs=i, outputs=o)
model = UNet((sequences_per_batch, train_seq, image_size, image_size, input_dimension), out_ch=6, start_ch=base)
model.load_weights('v2.h5')
model.compile(loss='mean_squared_error', optimizer=RMSprop())
for k in model.layers:
print(k.output_shape)
plot_model(model, to_file='model.png')
def load_sequence(p, is_train=False):
pattern = p.decode("utf-8")
val = []
for s in xrange(sequence_length):
name = pattern.format('test', sequence_start + s, folder)
try:
input_img = scipy.misc.imread(name, mode='L').astype(np.float)
except:
val.append(np.zeros((1, image_size, image_size, input_dimension + output_dimension)))
continue
images = np.split(input_img, input_dimension + output_dimension, axis=1)
half_offset = 4
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 256 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append(np.zeros((image.shape[0], top)), image, axis=1)
image = np.append(image, np.zeros((image.shape[0], bottom)), axis=1)
left = int((fullsize - image.shape[0]) / 2)
right = fullsize - image.shape[0] - left
image = np.append(np.zeros((left, image.shape[1])), image, axis=0)
image = np.append(image, np.zeros((right, image.shape[1])), axis=0)
tmp = scipy.misc.imresize(image, [hypersize, hypersize], interp='nearest')
if is_train:
image = tmp[h1:h1+image_size, w1:w1+image_size]
else:
image = tmp[half_offset:half_offset+image_size, half_offset:half_offset+image_size]
image = image/127.5
conv.append(image)
#print(np.stack(conv, axis=2).shape)
val.append([np.stack(conv, axis=2)])
st = np.stack(val, axis=1)
#z = np.zeros((1, sequence_length - st.shape[1], image_size, image_size, input_dimension + output_dimension))
#o = np.append(z, st, axis=1)
o = st
o = o - 1
return o
def makeMask(gt, ct):
gt = (gt+1) / 2
ct = (ct+1) / 2
t_mask = np.clip(gt - ct, 0, 1)
n_mask = np.clip(ct - gt, 0, 1)
t_mask = (t_mask * 2) - 1
n_mask = (n_mask * 2) - 1
return np.concatenate((t_mask, n_mask), axis=4)
def extractGT(seq):
gt, data = np.split(batch_sequence, [output_dimension], axis=4)
gta, gtb, gtc = np.split(gt, 3, axis=4)
z1, z2, z3, z4, cta, ctb, ctc = np.split(data, input_dimension, axis=4)
m1 = makeMask(gta, cta)
m2 = makeMask(gtb, ctb)
m3 = makeMask(gtc, ctc)
gt = np.concatenate((m1, m2, m3), axis=4)
return data, gt, np.concatenate((cta, ctb, ctc), axis=4)
def combine(e, g, p1, q1):
p, m = np.split(e, 2, axis=4)
return np.sign(g + np.sign(p-p1) - np.sign(m-q1))
def merge(yo, error, p, q):
ae, be, ce = np.split(error, 3, axis=4)
ag, bg, cg = np.split(yo, 3, axis=4)
a = combine(ae, ag, p, q)
b = combine(be, bg, p, q)
c = combine(ce, cg, p, q)
return np.concatenate((a, b, c), axis=4)
def wrt(yo, error, name, p, q, c):
out = merge(yo, error, p, q)
all = np.append(batch_sequence, out, axis=4)
all = all.reshape((train_seq, image_size, image_size, 13))
sp = np.split(all, train_seq, axis=0)
sp = [s.reshape((image_size, image_size, 13)) for s in sp]
haa = np.concatenate(sp, axis=0)
jaa = np.concatenate(np.split(haa, 13, axis=2), axis=1)
fa = (jaa+1.)/2.
yo = np.concatenate((fa, fa, fa), axis=2)
scipy.misc.imsave(files[sequence].format('out', c, name), yo)
# test
number_of_sequences = files.size
for sequence in range(number_of_sequences):
print('S: {} '.format(sequence))
seq = load_sequence(files[sequence])
batch_sequences = np.split(seq, train_cnt, axis=1)
model.reset_states()
c = 0
for batch_sequence in batch_sequences:
data, gt, yo = extractGT(batch_sequence)
error = model.predict_on_batch(data)
wrt(yo, error, 'o1', 0.5, 0.5, c)
wrt(yo, error, 'o2', 0.3, 0.8, c)
wrt(yo, error, 'o3', 0.8, 0.3, c)
c = c + 1
| ultra-lstm/RNA-GAN | Refinement/predict.py | Python | mit | 7,897 |
import RPi.GPIO as GPIO
import time
import utils
import therm
GPIO.setmode(GPIO.BOARD)
#pwr = utils.PSU(13, 15)
#pwr.on()
#pwr.off()
adresses = therm.get_adr()
samples = 5
therms = []
now = time.time()
t_amb = therm.Therm('28-000004e08693')
t_c_b = therm.Therm('28-000004e0f7cc')
t_c_m = therm.Therm('28-000004e0840a')
t_c_t = therm.Therm('28-000004e08e26')
t_hs = therm.Therm('28-000004e0804f')
print time.time() - now
now = time.time()
for i in range(samples):
temp_row = [t_amb.read_temp(), t_c_b.read_temp(), t_c_m.read_temp(), t_c_t.read_temp(), t_hs.read_temp()]
print temp_row
therms.append(temp_row)
print time.time() - now
now = time.time()
print therms
#GPIO.cleanup()
| Wollert/beer | test_therm.py | Python | mit | 709 |
#encoding:utf-8
from user import app
if __name__ == '__main__':
app.run(host='0.0.0.0',port=9002,debug=True) | 51reboot/actual_09_homework | 09/liubaobao/cmdb_V4/manage.py | Python | mit | 121 |
"""Configure heppyplotlib or the underlying matplotlib."""
import matplotlib.pyplot as plt
def use_tex(use_serif=True, overwrite=True, preamble=None):
"""Configure pyplot to use LaTeX for text rendering."""
if plt.rcParams['text.usetex'] and not overwrite:
print("Will not override tex settings ...")
return
print("Will use tex for rendering ...")
if preamble is None:
if use_serif:
plt.rc('font', family='serif')
preamble = [r'\usepackage{amsmath}',
r'\usepackage{siunitx}',
r'\usepackage{hepnames}']
else:
# note that we do note even have a capital delta character (\Delta) apparently ...
# TODO: use a more complete sans serif font
preamble = [r'\usepackage{amsmath}',
r'\renewcommand*\familydefault{\sfdefault}',
r'\usepackage{siunitx}',
r'\usepackage{hepnames}',
r'\sisetup{number-mode=text}', # force siunitx to actually use your fonts
r'\usepackage{sansmath}', # load up the sansmath for sans-serif math
r'\sansmath'] # enable sansmath
plt.rcParams['text.latex.preamble'] = preamble
plt.rc('text', usetex=True)
def set_font_sizes(normal=9, small=8):
r"""Configure pyplot to use these two font sizes.
Match LaTeX paper font sizes using:
.. code-block:: latex
\makeatletter
\newcommand\thefontsize[1]{{#1 The current font size is: \f@size pt\par}}
\makeatother
e.g. extract the font sizes for captions and subcaptions (as in the example)
"""
params = {'font.size': normal, # \thefontsize\small (like captions)
'figure.titlesize': normal,
'axes.titlesize': normal,
'axes.labelsize': normal,
'legend.fontsize': small,
'xtick.labelsize': small, # \thefontsize\footnotesize (like subcaptions)
'ytick.labelsize': small}
plt.rcParams.update(params)
def set_figure_size(latex_width, aspect_ratio=0.6875):
r"""Set figure size given a width in LaTeX points.
Match LaTeX paper text width using:
.. code-block:: latex
\the\textwidth
"""
tex_points_per_inch = 72.27
inches_per_tex_point = 1.0 / tex_points_per_inch
inches_width = latex_width * inches_per_tex_point
plt.rc('figure', figsize=[inches_width, inches_width * aspect_ratio])
| ebothmann/heppyplotlib | heppyplotlib/configuration.py | Python | mit | 2,563 |
import keras
import keras.layers
import keras.models
def concatenate(x):
if hasattr(keras.layers, 'Concatenate'):
return keras.layers.Concatenate()(x)
else:
return keras.layers.merge(x, mode='concat')
def add(x):
if hasattr(keras.layers, 'Add'):
return keras.layers.Add()(x)
else:
return keras.layers.merge(x, mode='sum')
def Model(input, output, **kwargs):
if int(keras.__version__.split('.')[0]) >= 2:
return keras.models.Model(inputs=input, outputs=output, **kwargs)
else:
return keras.models.Model(input=input, output=output, **kwargs)
| sahabi/opt | rl/keras_future.py | Python | mit | 618 |
class ShirtsioError(Exception):
def __init__(self, message=None, http_body=None, http_status=None, json_body=None):
super(ShirtsioError, self).__init__(message)
self.http_body = http_body and http_body.decode('utf-8')
self.http_status = http_status
self.json_body = json_body
class APIError(ShirtsioError):
pass
class APIConnectionError(ShirtsioError):
pass
class InvalidRequestError(ShirtsioError):
def __init__(self, message, http_body=None, http_status=None, json_body=None):
super(InvalidRequestError, self).__init__(message, http_body, http_status, json_body)
class AuthenticationError(ShirtsioError):
pass | harpesichord/Door-Access | door/exception.py | Python | mit | 679 |
import chaospy
import numpy
import pytest
@pytest.fixture
def samples(joint):
return joint.sample(10, rule="sobol")
@pytest.fixture
def evaluations(model_solver, samples):
return numpy.array([model_solver(sample) for sample in samples.T])
@pytest.fixture
def expansion(samples):
return chaospy.lagrange_polynomial(samples)
@pytest.fixture
def lagrange_approximation(evaluations, expansion):
return chaospy.sum(evaluations.T*expansion, axis=-1).T
def test_lagrange_mean(lagrange_approximation, joint, true_mean):
assert numpy.allclose(chaospy.E(lagrange_approximation, joint), true_mean, rtol=1e-3)
def test_lagrange_variance(lagrange_approximation, joint, true_variance):
assert numpy.allclose(chaospy.Var(lagrange_approximation, joint), true_variance, rtol=1e-2)
| jonathf/chaospy | tests/test_lagrange_polynomials.py | Python | mit | 800 |
import nose
from os import path
file_path = path.abspath(__file__)
tests_path = path.join(path.abspath(path.dirname(file_path)), "tests")
nose.main(argv=[path.abspath(__file__), "--with-coverage", "--cover-erase", "--cover-package=frapalyzer", tests_path])
| rbnvrw/FRAPalyzer | test.py | Python | mit | 258 |
from jira_extended import JIRA
jira = JIRA(
'<url>',
basic_auth=(
'<user>',
'<password>',
),
options={
'extended_url': '<url>',
}
)
jira.search_issues('project = "PROJECT1"')[0].move('PROJECT2')
| gillarkod/jira_extended | example.py | Python | mit | 240 |
from flask import Blueprint
import flask_restx
from flask_restx import Resource
from flask import request
# import subprocess
# from os import path
# from flask import redirect
from sanskrit_parser.base.sanskrit_base import SanskritObject, SLP1
from sanskrit_parser.parser.sandhi_analyzer import LexicalSandhiAnalyzer
from sanskrit_parser import __version__
from sanskrit_parser import Parser
URL_PREFIX = '/v1'
api_blueprint = Blueprint(
'sanskrit_parser', __name__,
template_folder='templates'
)
api = flask_restx.Api(app=api_blueprint, version='1.0', title='sanskrit_parser API',
description='For detailed intro and to report issues: see <a href="https://github.com/kmadathil/sanskrit_parser">here</a>. '
'A list of REST and non-REST API routes avalilable on this server: <a href="../sitemap">sitemap</a>.',
default_label=api_blueprint.name,
prefix=URL_PREFIX, doc='/docs')
analyzer = LexicalSandhiAnalyzer()
def jedge(pred, node, label):
return (node.pada.devanagari(strict_io=False),
jtag(node.getMorphologicalTags()),
SanskritObject(label, encoding=SLP1).devanagari(strict_io=False),
pred.pada.devanagari(strict_io=False))
def jnode(node):
""" Helper to translate parse node into serializable format"""
return (node.pada.devanagari(strict_io=False),
jtag(node.getMorphologicalTags()), "", "")
def jtag(tag):
""" Helper to translate tag to serializable format"""
return (tag[0].devanagari(strict_io=False), [t.devanagari(strict_io=False) for t in list(tag[1])])
def jtags(tags):
""" Helper to translate tags to serializable format"""
return [jtag(x) for x in tags]
@api.route('/version/')
class Version(Resource):
def get(self):
"""Library Version"""
r = {"version": str(__version__)}
return r
@api.route('/tags/<string:p>')
class Tags(Resource):
def get(self, p):
""" Get lexical tags for p """
pobj = SanskritObject(p, strict_io=False)
tags = analyzer.getMorphologicalTags(pobj)
if tags is not None:
ptags = jtags(tags)
else:
ptags = []
r = {"input": p, "devanagari": pobj.devanagari(), "tags": ptags}
return r
@api.route('/splits/<string:v>')
class Splits(Resource):
def get(self, v):
""" Get lexical tags for v """
strict_p = True
if request.args.get("strict") == "false":
strict_p = False
vobj = SanskritObject(v, strict_io=strict_p, replace_ending_visarga=None)
g = analyzer.getSandhiSplits(vobj)
if g:
splits = g.find_all_paths(10)
jsplits = [[ss.devanagari(strict_io=False) for ss in s] for s in splits]
else:
jsplits = []
r = {"input": v, "devanagari": vobj.devanagari(), "splits": jsplits}
return r
@api.route('/parse-presegmented/<string:v>')
class Parse_Presegmented(Resource):
def get(self, v):
""" Parse a presegmented sentence """
strict_p = True
if request.args.get("strict") == "false":
strict_p = False
vobj = SanskritObject(v, strict_io=strict_p, replace_ending_visarga=None)
parser = Parser(input_encoding="SLP1",
output_encoding="Devanagari",
replace_ending_visarga='s')
mres = []
print(v)
for split in parser.split(vobj.canonical(), limit=10, pre_segmented=True):
parses = list(split.parse(limit=10))
sdot = split.to_dot()
mres = [x.serializable() for x in parses]
pdots = [x.to_dot() for x in parses]
r = {"input": v, "devanagari": vobj.devanagari(), "analysis": mres,
"split_dot": sdot,
"parse_dots": pdots}
return r
@api.route('/presegmented/<string:v>')
class Presegmented(Resource):
def get(self, v):
""" Presegmented Split """
vobj = SanskritObject(v, strict_io=True, replace_ending_visarga=None)
parser = Parser(input_encoding="SLP1",
output_encoding="Devanagari",
replace_ending_visarga='s')
splits = parser.split(vobj.canonical(), limit=10, pre_segmented=True)
r = {"input": v, "devanagari": vobj.devanagari(), "splits": [x.serializable()['split'] for x in splits]}
return r
| kmadathil/sanskrit_parser | sanskrit_parser/rest_api/api_v1.py | Python | mit | 4,459 |
# -*- coding: utf-8 -*-
#
# pymfe documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 11 12:35:11 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymfe'
copyright = u'2015, Michael J. Ireland'
author = u'Michael J. Ireland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymfedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymfe.tex', u'pymfe Documentation',
u'Michael J. Ireland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymfe', u'pymfe Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pymfe', u'pymfe Documentation',
author, 'pymfe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mikeireland/pymfe | doc/conf.py | Python | mit | 9,303 |
# -*- coding: utf-8 -*-
"""
flask_security.forms
~~~~~~~~~~~~~~~~~~~~
Flask-Security forms module
:copyright: (c) 2012 by Matt Wright.
:copyright: (c) 2017 by CERN.
:license: MIT, see LICENSE for more details.
"""
import inspect
from flask import Markup, current_app, flash, request
from flask_login import current_user
from flask_wtf import FlaskForm as BaseForm
from werkzeug.local import LocalProxy
from wtforms import BooleanField, Field, HiddenField, PasswordField, \
StringField, SubmitField, ValidationError, validators
from .babel import _get_i18n_domain
from .confirmable import requires_confirmation
from .utils import _, config_value, get_message, url_for_security, \
validate_redirect_url, verify_and_update_password
# Convenient reference
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
_default_field_labels = {
'email': _('Email Address'),
'password': _('Password'),
'remember_me': _('Remember Me'),
'login': _('Login'),
'register': _('Register'),
'send_confirmation': _('Resend Confirmation Instructions'),
'recover_password': _('Recover Password'),
'reset_password': _('Reset Password'),
'retype_password': _('Retype Password'),
'new_password': _('New Password'),
'change_password': _('Change Password'),
'send_login_link': _('Send Login Link')
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)[0]
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.DataRequired):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_NOT_PROVIDED')
email_validator = Email(message='INVALID_EMAIL_ADDRESS')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=6, max=128, message='PASSWORD_INVALID_LENGTH')
def get_form_field_label(key):
return _get_i18n_domain(current_app).lazy_gettext(_default_field_labels.get(key, ''))
def unique_user_email(form, field):
if _datastore.get_user(field.data) is not None:
msg = get_message('EMAIL_ALREADY_ASSOCIATED', email=field.data)[0]
raise ValidationError(msg)
def valid_user_email(form, field):
form.user = _datastore.get_user(field.data)
if form.user is None:
raise ValidationError(get_message('USER_DOES_NOT_EXIST')[0])
class Form(BaseForm):
def __init__(self, *args, **kwargs):
if current_app.testing:
self.TIME_LIMIT = None
super(Form, self).__init__(*args, **kwargs)
class EmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class UserEmailFormMixin():
user = None
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
class UniqueEmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, unique_user_email])
class PasswordFormMixin():
password = PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin():
password = PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin():
password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('password', message='RETYPE_PASSWORD_MISMATCH')])
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data and not validate_redirect_url(field.data):
field.data = ''
flash(*get_message('INVALID_REDIRECT'))
raise ValidationError(get_message('INVALID_REDIRECT')[0])
class RegisterFormMixin():
submit = SubmitField(get_form_field_label('register'))
def to_dict(form):
def is_field_and_user_attr(member):
return isinstance(member, Field) and \
hasattr(_datastore.user_model, member.name)
fields = inspect.getmembers(form, is_field_and_user_attr)
return dict((key, value.data) for key, value in fields)
class SendConfirmationForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('send_confirmation'))
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(get_message('ALREADY_CONFIRMED')[0])
return False
return True
class ForgotPasswordForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('recover_password'))
def validate(self):
if not super(ForgotPasswordForm, self).validate():
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
return True
class PasswordlessLoginForm(Form, UserEmailFormMixin):
"""The passwordless login form"""
submit = SubmitField(get_form_field_label('send_login_link'))
def __init__(self, *args, **kwargs):
super(PasswordlessLoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(PasswordlessLoginForm, self).validate():
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class LoginForm(Form, NextFormMixin):
"""The default login form"""
email = StringField(get_form_field_label('email'))
password = PasswordField(get_form_field_label('password'))
remember = BooleanField(get_form_field_label('remember_me'))
submit = SubmitField(get_form_field_label('login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = config_value('DEFAULT_REMEMBER_ME')
if current_app.extensions['security'].recoverable and \
not self.password.description:
html = Markup('<a href="{url}">{message}</a>'.format(
url=url_for_security("forgot_password"),
message=get_message("FORGOT_PASSWORD")[0],
))
self.password.description = html
def validate(self):
if not super(LoginForm, self).validate():
return False
if not self.email.data or self.email.data.strip() == '':
self.email.errors.append(get_message('EMAIL_NOT_PROVIDED')[0])
return False
if not self.password.data or self.password.data.strip() == '':
self.password.errors.append(
get_message('PASSWORD_NOT_PROVIDED')[0])
return False
self.user = _datastore.get_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message('USER_DOES_NOT_EXIST')[0])
return False
if not self.user.password:
self.password.errors.append(get_message('PASSWORD_NOT_SET')[0])
return False
if not verify_and_update_password(self.password.data, self.user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class ConfirmRegisterForm(Form, RegisterFormMixin,
UniqueEmailFormMixin, NewPasswordFormMixin):
pass
class RegisterForm(ConfirmRegisterForm, PasswordConfirmFormMixin,
NextFormMixin):
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
class ResetPasswordForm(Form, NewPasswordFormMixin, PasswordConfirmFormMixin):
"""The default reset password form"""
submit = SubmitField(get_form_field_label('reset_password'))
class ChangePasswordForm(Form, PasswordFormMixin):
"""The default change password form"""
new_password = PasswordField(
get_form_field_label('new_password'),
validators=[password_required, password_length])
new_password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('new_password',
message='RETYPE_PASSWORD_MISMATCH')])
submit = SubmitField(get_form_field_label('change_password'))
def validate(self):
if not super(ChangePasswordForm, self).validate():
return False
if not verify_and_update_password(self.password.data, current_user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data.strip() == self.new_password.data.strip():
self.password.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
| themylogin/flask-security | flask_security/forms.py | Python | mit | 9,925 |
#!/usr/bin/env python
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def text_to_id(text):
"""
Convert input text to id.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text
| oh6hay/refworks-bibtex-postprocess | textutil.py | Python | mit | 943 |
import struct
OK, EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED = range(6)
IXFR, AXFR, MAILB, MAILA, ALL_RECORDS = range(251, 256)
IN, CS, CH, HS = range(1, 5)
from io import BytesIO
class Message:
"""
L{Message} contains all the information represented by a single
DNS request or response.
@ivar id: See L{__init__}
@ivar answer: See L{__init__}
@ivar opCode: See L{__init__}
@ivar recDes: See L{__init__}
@ivar recAv: See L{__init__}
@ivar auth: See L{__init__}
@ivar rCode: See L{__init__}
@ivar trunc: See L{__init__}
@ivar maxSize: See L{__init__}
@ivar authenticData: See L{__init__}
@ivar checkingDisabled: See L{__init__}
@ivar queries: The queries which are being asked of or answered by
DNS server.
@type queries: L{list} of L{Query}
@ivar answers: Records containing the answers to C{queries} if
this is a response message.
@type answers: L{list} of L{RRHeader}
@ivar authority: Records containing information about the
authoritative DNS servers for the names in C{queries}.
@type authority: L{list} of L{RRHeader}
@ivar additional: Records containing IP addresses of host names
in C{answers} and C{authority}.
@type additional: L{list} of L{RRHeader}
"""
headerFmt = "!H2B4H"
headerSize = struct.calcsize(headerFmt)
# Question, answer, additional, and nameserver lists
queries = answers = add = ns = None
def __init__(self, id=0, answer=0, opCode=0, recDes=0, recAv=0,
auth=0, rCode=OK, trunc=0, maxSize=512,
authenticData=0, checkingDisabled=0):
"""
@param id: A 16 bit identifier assigned by the program that
generates any kind of query. This identifier is copied to
the corresponding reply and can be used by the requester
to match up replies to outstanding queries.
@type id: L{int}
@param answer: A one bit field that specifies whether this
message is a query (0), or a response (1).
@type answer: L{int}
@param opCode: A four bit field that specifies kind of query in
this message. This value is set by the originator of a query
and copied into the response.
@type opCode: L{int}
@param recDes: Recursion Desired - this bit may be set in a
query and is copied into the response. If RD is set, it
directs the name server to pursue the query recursively.
Recursive query support is optional.
@type recDes: L{int}
@param recAv: Recursion Available - this bit is set or cleared
in a response and denotes whether recursive query support
is available in the name server.
@type recAv: L{int}
@param auth: Authoritative Answer - this bit is valid in
responses and specifies that the responding name server
is an authority for the domain name in question section.
@type auth: L{int}
@ivar rCode: A response code, used to indicate success or failure in a
message which is a response from a server to a client request.
@type rCode: C{0 <= int < 16}
@param trunc: A flag indicating that this message was
truncated due to length greater than that permitted on the
transmission channel.
@type trunc: L{int}
@param maxSize: The requestor's UDP payload size is the number
of octets of the largest UDP payload that can be
reassembled and delivered in the requestor's network
stack.
@type maxSize: L{int}
@param authenticData: A flag indicating in a response that all
the data included in the answer and authority portion of
the response has been authenticated by the server
according to the policies of that server.
See U{RFC2535 section-6.1<https://tools.ietf.org/html/rfc2535#section-6.1>}.
@type authenticData: L{int}
@param checkingDisabled: A flag indicating in a query that
pending (non-authenticated) data is acceptable to the
resolver sending the query.
See U{RFC2535 section-6.1<https://tools.ietf.org/html/rfc2535#section-6.1>}.
@type authenticData: L{int}
"""
self.maxSize = maxSize
self.id = id
self.answer = answer
self.opCode = opCode
self.auth = auth
self.trunc = trunc
self.recDes = recDes
self.recAv = recAv
self.rCode = rCode
self.authenticData = authenticData
self.checkingDisabled = checkingDisabled
self.queries = []
self.answers = []
self.authority = []
self.additional = []
def addQuery(self, name, type=ALL_RECORDS, cls=IN):
"""
Add another query to this Message.
@type name: C{bytes}
@param name: The name to query.
@type type: C{int}
@param type: Query type
@type cls: C{int}
@param cls: Query class
"""
self.queries.append(Query(name, type, cls))
def encode(self, strio):
compDict = {}
body_tmp = BytesIO()
for q in self.queries:
q.encode(body_tmp, compDict)
for q in self.answers:
q.encode(body_tmp, compDict)
for q in self.authority:
q.encode(body_tmp, compDict)
for q in self.additional:
q.encode(body_tmp, compDict)
body = body_tmp.getvalue()
size = len(body) + self.headerSize
if self.maxSize and size > self.maxSize:
self.trunc = 1
body = body[:self.maxSize - self.headerSize]
byte3 = (( ( self.answer & 1 ) << 7 )
| ((self.opCode & 0xf ) << 3 )
| ((self.auth & 1 ) << 2 )
| ((self.trunc & 1 ) << 1 )
| ( self.recDes & 1 ) )
byte4 = ( ( (self.recAv & 1 ) << 7 )
| ((self.authenticData & 1) << 5)
| ((self.checkingDisabled & 1) << 4)
| (self.rCode & 0xf ) )
strio.write(struct.pack(self.headerFmt, self.id, byte3, byte4,
len(self.queries), len(self.answers),
len(self.authority), len(self.additional)))
strio.write(body)
def decode(self, strio, length=None):
self.maxSize = 0
header = readPrecisely(strio, self.headerSize)
r = struct.unpack(self.headerFmt, header)
self.id, byte3, byte4, nqueries, nans, nns, nadd = r
self.answer = ( byte3 >> 7 ) & 1
self.opCode = ( byte3 >> 3 ) & 0xf
self.auth = ( byte3 >> 2 ) & 1
self.trunc = ( byte3 >> 1 ) & 1
self.recDes = byte3 & 1
self.recAv = ( byte4 >> 7 ) & 1
self.authenticData = ( byte4 >> 5 ) & 1
self.checkingDisabled = ( byte4 >> 4 ) & 1
self.rCode = byte4 & 0xf
self.queries = []
for i in range(nqueries):
self.name.decode(strio)
buff = readPrecisely(strio, 4)
self.type, self.cls = struct.unpack("!HH", buff)
q = Query()
try:
q.decode(strio)
except EOFError:
return
self.queries.append(q)
items = (
(self.answers, nans),
(self.authority, nns),
(self.additional, nadd))
for (l, n) in items:
self.parseRecords(l, n, strio)
def parseRecords(self, list, num, strio):
for i in range(num):
header = RRHeader(auth=self.auth)
try:
header.decode(strio)
except EOFError:
return
t = self.lookupRecordType(header.type)
if not t:
continue
header.payload = t(ttl=header.ttl)
try:
header.payload.decode(strio, header.rdlength)
except EOFError:
return
list.append(header)
# Create a mapping from record types to their corresponding Record_*
# classes. This relies on the global state which has been created so
# far in initializing this module (so don't define Record classes after
# this).
_recordTypes = {}
for name in globals():
if name.startswith('Record_'):
_recordTypes[globals()[name].TYPE] = globals()[name]
# Clear the iteration variable out of the class namespace so it
# doesn't become an attribute.
del name
def lookupRecordType(self, type):
"""
Retrieve the L{IRecord} implementation for the given record type.
@param type: A record type, such as L{A} or L{NS}.
@type type: C{int}
@return: An object which implements L{IRecord} or C{None} if none
can be found for the given type.
@rtype: L{types.ClassType}
"""
return self._recordTypes.get(type, UnknownRecord)
def toStr(self):
"""
Encode this L{Message} into a byte string in the format described by RFC
1035.
@rtype: C{bytes}
"""
strio = BytesIO()
self.encode(strio)
return strio.getvalue()
def fromStr(self, str):
"""
Decode a byte string in the format described by RFC 1035 into this
L{Message}.
@param str: L{bytes}
"""
strio = BytesIO(str)
self.decode(strio)
def readPrecisely(file, l):
buff = file.read(l)
if len(buff) < l:
raise EOFError
return buff
# DNS Protocol Version Query Request
verPayload = '\x02\xec' # Transaction ID 748
verPayload += '\x01\x00' # Standard query flag (1, 0)
verPayload += '\x00\x01' # Questions 1
verPayload += '\x00\x00' # Number of Answers 0
verPayload += '\x00\x00' # Number of Authoritative Records 0
verPayload += '\x00\x00' # Number of Additional Records 0
verPayload += '\x07\x76\x65\x72\x73\x69\x6f\x6e\x04\x62\x69\x6e\x64\x00\x00\x10\x00\x03' # version.bind Request
headerFmt = "!H2B4H"
headerSize = struct.calcsize(headerFmt)
strio = BytesIO(verPayload)
print(strio)
header = readPrecisely(strio, headerSize)
print(header)
print(struct.unpack(headerFmt, header))
m = Message()
m.fromStr(verPayload)
| MathYourLife/TSatPy-thesis | sandbox/dns_string_io.py | Python | mit | 10,430 |
"""Functions for TML layout that are used in the grammar to construct DOM-like
node objects used in the 164 layout engine.
"""
def createNode(name, attributes=None, children=None):
"""Creates a DOM-like node object, using the 164 representation so that
the node can be processed by the 164 layout engine.
"""
node = dict(attributes)
node['name'] = name
# Represent the list of child nodes as a dict with numeric keys.
node['children'] = dict(enumerate(children)) if children else {}
return node
def createWordNodes(text):
"""Returns a Python list of DOM-like nodes, one for each word in the given
text.
"""
return [createNode('Word', {'word': word + ' '}) for word in text.split()]
| michelle/sink | 164/tml.py | Python | mit | 733 |
import urllib.parse
from upol_search_engine.upol_crawler.tools import blacklist, robots
def validate_regex(url, regex):
"""Check if url is validate with regex"""
return regex.match(url)
def validate_anchor(url):
"""Check if url include anchor"""
cheme, netloc, path, qs, anchor = urllib.parse.urlsplit(url)
if anchor:
return False
else:
return True
def validate_phpbb(url):
"""Validate if url from phpBB system is valid or blacklisted"""
scheme, netloc, path, qs, anchor = urllib.parse.urlsplit(url)
path = path + qs + anchor
url_keywords = ['posting.php',
'ucp.php',
'view=print',
'memberlist.php',
'mark']
for url_keyword in url_keywords:
if url_keyword in path:
return False
return True
def validate_wiki(url):
"""Validate if url from wiki system is valid or blacklisted"""
scheme, netloc, path, qs, anchor = urllib.parse.urlsplit(url)
path = path + qs + anchor
url_keywords = ['&']
for url_keyword in url_keywords:
if url_keyword in path:
return False
return True
def validate(url, regex, blacklist_list):
"""Complete validator"""
if not validate_anchor(url):
return False, 'UrlHasAnchor'
if not validate_regex(url, regex):
return False, 'UrlInvalidRegex'
if blacklist.is_url_blocked(url, blacklist_list):
return False, 'UrlIsBlacklisted'
if not robots.is_crawler_allowed(url):
return False, 'UrlRobotsBlocked'
return True, None
| UPOLSearch/UPOL-Search-Engine | upol_search_engine/upol_crawler/core/validator.py | Python | mit | 1,618 |
from __future__ import with_statement
import unittest
import re
import os
import sys
import cStringIO as StringIO
from distutils.version import LooseVersion
import pep8
PEP8_VERSION = LooseVersion(pep8.__version__)
PEP8_MAX_OLD_VERSION = LooseVersion('1.0.1')
PEP8_MIN_NEW_VERSION = LooseVersion('1.3.3')
# Check for supported version of the pep8 library,
# which is anything <= 1.0.1, or >= 1.3.3. (yes, there is a gap)
if (PEP8_VERSION > PEP8_MAX_OLD_VERSION and PEP8_VERSION < PEP8_MIN_NEW_VERSION):
raise ImportError('Bad pep8 version, must be >= %s or <= %s.' % (PEP8_MIN_NEW_VERSION, PEP8_MAX_OLD_VERSION))
# Skip these pep8 errors/warnings
PEP8_IGNORE = (
'E123', 'E126', 'E127', 'E128', 'E501',
)
# Any file or directory(including subdirectories) matching regex will be skipped.
NAMES_TO_SKIP = (
'.svn',
'.git',
'docs',
'dist',
'build',
)
NAMES_TO_SKIP = [re.compile('^%s' % n) for n in NAMES_TO_SKIP]
class RedirectIO(object):
'''Contextmanager to redirect stdout/stderr.'''
def __init__(self, stdout=None, stderr=None):
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
self._stderr.flush()
sys.stdout, sys.stderr = self.old_stdout, self.old_stderr
class Pep8TestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
# set up newer pep8 options
if PEP8_VERSION >= PEP8_MIN_NEW_VERSION:
self.options = pep8.StyleGuide().options
self.options.ignore = self.options.ignore + PEP8_IGNORE
else:
self.options = None
# Populate pep8 test methods, one per non-skipped .py file found.
ROOT = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(ROOT, followlinks=True):
for regex in NAMES_TO_SKIP:
paths = dirnames[:] # lame list copy
for path in paths:
if regex.match(path):
dirnames.remove(path)
files = filenames[:] # lame list copy
for filename in [f for f in filenames if not regex.match(f)]:
if not filename.endswith('.py'):
continue
fullpath = os.path.join(dirpath, filename)
if PEP8_VERSION < PEP8_MIN_NEW_VERSION:
def closure(self, fullpath=fullpath):
pep8.process_options([
'--first', fullpath,
'--ignore', ','.join(PEP8_IGNORE)],
)
pep8.input_file(fullpath)
if len(pep8.get_statistics()):
self.fail('PEP8 issue in "%s"' % fullpath)
else:
def closure(self, fullpath=fullpath):
checker = pep8.Checker(fullpath, options=self.options)
capture = StringIO.StringIO()
with RedirectIO(capture):
errors = checker.check_all()
if errors > 0:
capture.seek(0)
errors = list()
for error in capture.readlines():
errors.append('./%s' % error[len(ROOT) + 1:].strip())
self.fail('PEP8 issue in "%s"\n%s' % (fullpath, '\n'.join(errors)))
relativepath = fullpath[len(ROOT) + 1:]
func_name = 'test_pep8./%s' % relativepath # Surprised invalid identifiers work.
closure.__name__ = func_name
setattr(Pep8TestCase, func_name, closure)
del closure # Necessary so nosetests doesn't make testcase out of it.
| njharman/cuprum | tests/test_pep8.py | Python | mit | 3,761 |
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.BlogListView.as_view(), name="blog_list"),
url(r'^(?P<slug>[\w-]+)/$', views.BlogSingleView.as_view(), name="blog_single"),
url(r'^category/(?P<slug>[\w-]+)/$', views.BlogCategoryView.as_view(), name="blog_category"),
]
| ethdeveloper/ethdeveloper | blog/urls.py | Python | mit | 329 |
# encoding: utf-8
# ## Imports
from threading import local as __local
# Expose these as importable from the top-level `web.core` namespace.
from .application import Application
from .util import lazy
# ## Module Globals
__all__ = ['local', 'Application', 'lazy'] # Symbols exported by this package.
# This is to support the web.ext.local extension, and allow for early importing of the variable.
local = __local()
| marrow/WebCore | web/core/__init__.py | Python | mit | 424 |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the MineCoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "MineCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| CoinProjects/MineCoin | share/qt/clean_mac_info_plist.py | Python | mit | 897 |
# -*- coding: utf-8 -*-
# urls.py ---
#
# Created: Wed Dec 14 23:02:53 2011 (+0200)
# Author: Janne Kuuskeri
#
from django.conf.urls.defaults import patterns, url
import resources
dictresource = resources.MyDictResource()
textresource = resources.MyTextResource()
respresource = resources.MyRespResource()
authresource = resources.MyAuthResource()
anonresource = resources.MyAnonResource()
permresource = resources.MyPermResource()
noneresource = resources.MyNoneResource()
echoresource = resources.MyEchoResource()
personresource = resources.PersonResource()
mapperresource = resources.MyMapperResource()
decimalresource = resources.MyDecimalResource()
scandicresource = resources.MyScandicResource()
validationresource = resources.MyValidationResource()
scandicjsonresource = resources.MyScandicJsonResource()
defaulttxtmapperresource = resources.MyDefaultMapperResource_1()
defaultobjmapperresource = resources.MyDefaultMapperResource_2()
factoryresource = resources.FactoryResource()
acl_resources = (
dictresource,
textresource,
respresource,
authresource,
anonresource,
permresource,
)
urlpatterns = patterns('',
url(r'^perm', permresource),
url(r'^auth$', authresource),
url(r'^person', personresource),
url(r'^auth/anon', anonresource),
url(r'^valid', validationresource, name='validation'),
url(r'^factory', factoryresource),
url(r'^mapper/dict', dictresource),
url(r'^mapper/text', textresource),
url(r'^mapper/resp', respresource),
url(r'^mapper/none', noneresource),
url(r'^mapper/echo', echoresource),
url(r'^mapper/reverse', mapperresource),
url(r'^mapper/decimal', decimalresource),
url(r'^mapper/scandic$', scandicresource),
url(r'^mapper/scandic/json', scandicjsonresource),
url(r'^mapper/default/txt$', defaulttxtmapperresource),
url(r'^mapper/default/obj$', defaultobjmapperresource),
)
#
# urls.py ends here
| wuher/devil | test/deviltest/simple/urls.py | Python | mit | 1,935 |
from django.apps import apps
from rest_framework.serializers import SlugRelatedField, ModelSerializer, ValidationError
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from rest_polymorphic.serializers import PolymorphicSerializer
from taxonomy.models import Community, Taxon
from occurrence.models import (
AreaEncounter,
TaxonAreaEncounter,
CommunityAreaEncounter,
Landform,
RockType,
SoilType,
SoilColour,
Drainage,
SurveyMethod,
SoilCondition,
CountAccuracy,
CountMethod,
CountSubject,
PlantCondition,
DetectionMethod,
Confidence,
ReproductiveMaturity,
AnimalHealth,
AnimalSex,
CauseOfDeath,
SecondarySigns,
SampleType,
SampleDestination,
PermitType,
ObservationGroup,
FireHistory,
FileAttachment,
PlantCount,
AssociatedSpecies,
VegetationClassification,
HabitatComposition,
HabitatCondition,
AreaAssessment,
PhysicalSample,
AnimalObservation,
)
class OccurrenceAreaEncounterPolySerializer(GeoFeatureModelSerializer):
"""Serializer for Occurrence AreaEncounter.
"""
class Meta:
model = AreaEncounter
fields = (
"id", "code", "label", "name", "description", "as_html", "source", "source_id", "status",
"encountered_on", "encountered_by", "area_type", "accuracy", "northern_extent",
)
geo_field = "geom"
class OccurrenceAreaEncounterPointSerializer(GeoFeatureModelSerializer):
"""Serializer for Occurrence Area.
"""
class Meta:
model = AreaEncounter
fields = (
"id", "code", "label", "name", "description", "as_html", "source", "source_id", "status",
"encountered_on", "encountered_by", "area_type", "accuracy", "northern_extent",
)
geo_field = "point"
class OccurrenceTaxonAreaEncounterPolyInlineSerializer(GeoFeatureModelSerializer):
"""Serializer for Occurrence TaxonAreaEncounter to be used inline in TaxonSerializer.
"""
class Meta:
exclude = ("taxon", )
model = TaxonAreaEncounter
id_field = "id"
geo_field = "geom"
class OccurrenceTaxonAreaEncounterPolySerializer(GeoFeatureModelSerializer):
"""Serializer for Occurrence TaxonAreaEncounter.
"""
taxon = SlugRelatedField(
queryset=Taxon.objects.all(), slug_field="name_id")
class Meta:
model = TaxonAreaEncounter
fields = (
"taxon",
"id",
"code",
"label",
"name",
"description",
"as_html",
"source",
"source_id",
"status",
"encounter_type",
"encountered_on",
"encountered_by",
"area_type",
"geolocation_capture_method",
"accuracy",
"northern_extent",
"point"
)
id_field = "id"
geo_field = "geom"
class OccurrenceTaxonAreaEncounterPointSerializer(OccurrenceTaxonAreaEncounterPolySerializer):
"""Serializer for Occurrence TaxonAreaEncounter.
"""
class Meta(OccurrenceTaxonAreaEncounterPolySerializer.Meta):
geo_field = "point"
class OccurrenceCommunityAreaEncounterPolyInlineSerializer(GeoFeatureModelSerializer):
"""Serializer for Occurrence CommunityAreaEncounter to be used inline in CommunitySerializer.
"""
class Meta:
model = CommunityAreaEncounter
exclude = ("community", )
id_field = "id"
geo_field = "geom"
class OccurrenceCommunityAreaEncounterPolySerializer(GeoFeatureModelSerializer):
community = SlugRelatedField(
queryset=Community.objects.all(), slug_field="code")
class Meta:
model = CommunityAreaEncounter
fields = (
"community", "id", "code", "label", "name", "description", "as_html", "source", "source_id",
"status", "encountered_on", "encountered_by", "area_type", "accuracy", "northern_extent",
"point",
)
id_field = "id"
geo_field = "geom"
class OccurrenceCommunityAreaEncounterPointSerializer(OccurrenceCommunityAreaEncounterPolySerializer):
community = SlugRelatedField(
queryset=Community.objects.all(), slug_field="code")
class Meta(OccurrenceCommunityAreaEncounterPolySerializer.Meta):
geo_field = "point"
class LandformSerializer(ModelSerializer):
class Meta:
model = Landform
fields = "__all__"
class RockTypeSerializer(ModelSerializer):
class Meta:
model = RockType
fields = "__all__"
class SoilTypeSerializer(ModelSerializer):
class Meta:
model = SoilType
fields = "__all__"
class SoilColourSerializer(ModelSerializer):
class Meta:
model = SoilColour
fields = "__all__"
class DrainageSerializer(ModelSerializer):
class Meta:
model = Drainage
fields = "__all__"
class SurveyMethodSerializer(ModelSerializer):
class Meta:
model = SurveyMethod
fields = "__all__"
class SoilConditionSerializer(ModelSerializer):
class Meta:
model = SoilCondition
fields = "__all__"
class CountAccuracySerializer(ModelSerializer):
class Meta:
model = CountAccuracy
fields = "__all__"
class CountMethodSerializer(ModelSerializer):
class Meta:
model = CountMethod
fields = "__all__"
class CountSubjectSerializer(ModelSerializer):
class Meta:
model = CountSubject
fields = "__all__"
class PlantConditionSerializer(ModelSerializer):
class Meta:
model = PlantCondition
fields = "__all__"
class DetectionMethodSerializer(ModelSerializer):
class Meta:
model = DetectionMethod
fields = "__all__"
class ConfidenceSerializer(ModelSerializer):
class Meta:
model = Confidence
fields = "__all__"
class ReproductiveMaturitySerializer(ModelSerializer):
class Meta:
model = ReproductiveMaturity
fields = "__all__"
class AnimalHealthSerializer(ModelSerializer):
class Meta:
model = AnimalHealth
fields = "__all__"
class AnimalSexSerializer(ModelSerializer):
class Meta:
model = AnimalSex
fields = "__all__"
class CauseOfDeathSerializer(ModelSerializer):
class Meta:
model = CauseOfDeath
fields = "__all__"
class SecondarySignsSerializer(ModelSerializer):
class Meta:
model = SecondarySigns
fields = "__all__"
class SampleTypeSerializer(ModelSerializer):
class Meta:
model = SampleType
fields = "__all__"
class SampleDestinationSerializer(ModelSerializer):
class Meta:
model = SampleDestination
fields = "__all__"
class PermitTypeSerializer(ModelSerializer):
class Meta:
model = PermitType
fields = "__all__"
class ObservationGroupSerializer(ModelSerializer):
"""The ObservationGroup serializer resolves its polymorphic subclasses.
ObservationGroups have polymorphic subclasses.
A plain DRF serializer would simply return the shared ObservationGroup
fields, but not the individual fields partial to its subclasses.
Overriding the `to_representation` method, this serializer tests the
object to display for its real instance, and calls the `to_representation`
from the subclasses serializer.
`Credits <http://stackoverflow.com/a/19976203/2813717>`_
`Author <http://stackoverflow.com/users/1514427/michael-van-de-waeter>`_
"""
# as_latex = ReadOnlyField()
encounter = OccurrenceAreaEncounterPointSerializer(read_only=True)
class Meta:
model = ObservationGroup
fields = "__all__"
def validate(self, data):
"""Raise ValidateError on missing AreaEncounter(source, source_id).
"""
if not AreaEncounter.objects.filter(
source=int(self.initial_data["source"]),
source_id=str(self.initial_data["source_id"])
).exists():
raise ValidationError(
"AreaEncounter with source {0} and source_id {1}"
" does not exist, skipping.".format(
int(self.initial_data["source"]),
str(self.initial_data["source_id"])))
return data
def create(self, validated_data):
"""Create one new object, resolve AreaEncounter from source and source_id.
"""
validated_data["encounter"] = AreaEncounter.objects.get(
source=int(self.initial_data["source"]),
source_id=str(self.initial_data["source_id"]))
return self.Meta.model.objects.create(**validated_data)
class FileAttachmentSerializer(ObservationGroupSerializer):
class Meta:
model = FileAttachment
fields = "__all__"
class HabitatCompositionSerializer(ObservationGroupSerializer):
class Meta:
model = HabitatComposition
fields = "__all__"
class HabitatConditionSerializer(ObservationGroupSerializer):
class Meta:
model = HabitatCondition
fields = "__all__"
class AreaAssessmentSerializer(ObservationGroupSerializer):
class Meta:
model = AreaAssessment
fields = "__all__"
class FireHistorySerializer(ObservationGroupSerializer):
class Meta:
model = FireHistory
fields = "__all__"
class VegetationClassificationSerializer(ObservationGroupSerializer):
class Meta:
model = VegetationClassification
fields = "__all__"
class PlantCountSerializer(ObservationGroupSerializer):
count_method = SlugRelatedField(
queryset=CountMethod.objects.all(), slug_field='code', required=False)
count_accuracy = SlugRelatedField(
queryset=CountAccuracy.objects.all(), slug_field='code', required=False)
class Meta:
model = PlantCount
fields = "__all__"
class AssociatedSpeciesSerializer(ObservationGroupSerializer):
class Meta:
model = AssociatedSpecies
fields = "__all__"
class AnimalObservationSerializer(ObservationGroupSerializer):
class Meta:
model = AnimalObservation
fields = "__all__"
def to_internal_value(self, data):
"""Override to_internal_value and check the value of the optional `secondary_signs` key.
This key value might be present in a couple of different ways, which all need to be handled:
- /api/path/?secondary_signs=eggs
- /api/path/?secondary_signs=eggs,fur
- /api/path/?secondary_signs=eggs&secondary_signs=fur
We also need to convert comma-separated strings into a list of PKs for the equivalent
SecondarySign objects, for the purposes of setting M2M relationships.
References:
- https://www.django-rest-framework.org/api-guide/serializers/#read-write-baseserializer-classes
- https://stackoverflow.com/questions/31281938/overriding-django-rest-framework-serializer-is-valid-method
"""
data_update = dict(data)
if 'secondary_signs' in data_update:
# I.e. ['eggs,fur'] instead of ['eggs', 'fur']
if len(data_update['secondary_signs']) == 1:
data_update['secondary_signs'] = data_update[
'secondary_signs'][0].split(',')
# Change secondary_signs from a comma-separated list of strings
# into a list of PKs.
data_update['secondary_signs'] = [
SecondarySigns.objects.get(
code=i).pk for i in data_update['secondary_signs']]
return super(AnimalObservationSerializer, self).to_internal_value(data_update)
return super(AnimalObservationSerializer, self).to_internal_value(data)
def create(self, validated_data):
"""Create new object, resolve AreaEncounter from source and source_id.
"""
validated_data["encounter"] = AreaEncounter.objects.get(
source=int(self.initial_data["source"]),
source_id=str(self.initial_data["source_id"]))
# Pop the secondary_signs list out of validated data so that we can use set() after creating the new object
# because we can't make the M2M link before the object exists.
# At this point, it should be a list of PKs.
secondary_signs = validated_data.pop(
'secondary_signs') if 'secondary_signs' in validated_data else []
obj = self.Meta.model.objects.create(**validated_data)
if secondary_signs:
obj.secondary_signs.add(*secondary_signs)
return obj
class PhysicalSampleSerializer(ObservationGroupSerializer):
sample_type = SlugRelatedField(
queryset=SampleType.objects.all(), slug_field="code", required=False, allow_null=True)
sample_destination = SlugRelatedField(
queryset=SampleDestination.objects.all(), slug_field="code", required=False, allow_null=True)
permit_type = SlugRelatedField(
queryset=PermitType.objects.all(), slug_field='code', required=False, allow_null=True)
class Meta:
model = PhysicalSample
fields = "__all__"
class ObservationGroupPolymorphicSerializer(PolymorphicSerializer):
"""Polymorphic seralizer for ObservationGroup.
https://github.com/apirobot/django-rest-polymorphic
https://django-polymorphic.readthedocs.io/en/stable/third-party.html#django-rest-framework-support
"""
model_serializer_mapping = {
ObservationGroup: ObservationGroupSerializer,
FireHistory: FireHistorySerializer,
FileAttachment: FileAttachmentSerializer,
PlantCount: PlantCountSerializer,
AssociatedSpecies: AssociatedSpeciesSerializer,
VegetationClassification: VegetationClassificationSerializer,
HabitatCondition: HabitatConditionSerializer,
AreaAssessment: AreaAssessmentSerializer,
HabitatComposition: HabitatCompositionSerializer,
PhysicalSample: PhysicalSampleSerializer,
AnimalObservation: AnimalObservationSerializer
}
resource_type_field_name = 'obstype'
def to_internal_value(self, data):
"""Gate checks for data sanity."""
return super(ObservationGroupPolymorphicSerializer, self).to_internal_value(data)
| parksandwildlife/wastd | occurrence/serializers.py | Python | mit | 14,367 |
#!/usr/bin/python
# filename: basespace.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from datetime import datetime
import json
import os
import platform
from shutil import copyfile
import sys
import time
from BaseSpacePy.api.BaseSpaceAPI import BaseSpaceAPI
from BaseSpacePy.model.QueryParameters import QueryParameters as qp
from abutils.utils import log
from abutils.utils.pipeline import make_dir
from abutils.utils.progbar import progress_bar
if sys.version_info[0] > 2:
raw_input = input
logger = log.get_logger('basespace')
class BaseSpace(object):
def __init__(self, project_id=None, project_name=None, get_all_projects=False):
super(BaseSpace, self).__init__()
# BaseSpace credentials
creds = self._get_credentials()
self.client_key = creds['client_id']
self.client_secret = creds['client_secret']
self.access_token = creds['access_token']
self.version = creds['version']
self.api_server = creds['api_server']
self.api = BaseSpaceAPI(self.client_key, self.client_secret, self.api_server, self.version, AccessToken=self.access_token)
self.params = qp(pars={'Limit': 1024, 'SortDir': 'Desc'})
if project_id is not None:
self.project_id = project_id
self.project_name = None
elif project_name is not None:
self.project_name = project_name
self.project_id = self._get_project_id_from_name(project_name)
else:
self.project_id = None
self.project_name = None
# self.project_id, self.project_name = self._user_selected_project_id()
self._runs = None
@property
def runs(self):
if self._runs is None:
self._runs = self.api.getAccessibleRunsByUser(queryPars=self.params)
return self._runs
def _get_credentials(self):
# BaseSpace credentials file should be in JSON format
cred_file = os.path.expanduser('~/.abstar/basespace_credentials')
cred_handle = open(cred_file, 'r')
return json.load(cred_handle)
def _get_project_id_from_name(self):
projects = self.api.getProjectByUser(queryPars=self.params)
for project in projects:
name = project.Name.encode('ascii', 'ignore')
if sys.version_info[0] > 2:
name = name.decode('utf-8')
if name == self.project_name:
return project.Id
print('No projects matched the given project name ({})'.format(name))
sys.exit(1)
def _user_selected_project_id(self):
projects = self.api.getProjectByUser(queryPars=self.params)
self.print_basespace_project()
offset = 0
while True:
for i, project in enumerate(projects[offset * 25:(offset * 25) + 25]):
project_name = project.Name.encode('ascii', 'ignore')
if sys.version_info[0] > 2:
project_name = project_name.decode('utf-8')
print('[ {} ] {}'.format(i + (offset * 25), project_name))
print('')
project_index = raw_input("Select the project number (or 'next' to see more projects): ")
try:
project_index = int(project_index)
selected_id = projects[project_index].Id
selected_name = projects[project_index].Name.encode('ascii', 'ignore')
if sys.version_info[0] > 2:
selected_name = selected_name.decode('utf-8')
return selected_id, selected_name
except:
offset += 1
selected_id = projects[project_index].Id
selected_name = projects[project_index].Name.encode('ascii', 'ignore')
if sys.version_info[0] > 2:
selected_name = selected_name.decode('utf-8')
return selected_id, selected_name
# return projects[project_index].Id, projects[project_index].Name.encode('ascii', 'ignore')
def _get_projects(self, start=0):
projects = self.api.getProjectByUser(queryPars=self.params)
self.print_basespace_project()
for i, project in enumerate(projects[:25]):
project_name = project.Name.encode('ascii', 'ignore')
if sys.version_info[0] > 2:
project_name = project_name.decode('utf-8')
print('[ {} ] {}'.format(i, project_name))
print('')
return projects
def _get_samples(self, project_id):
samples = []
offset = 0
while True:
query_params = qp(pars={'Limit': 1024, 'SortDir': 'Asc', 'Offset': offset * 1024})
s = self.api.getSamplesByProject(project_id, queryPars=query_params)
if not s:
break
samples.extend(s)
offset += 1
return samples
def _get_files(self):
files = []
samples = self._get_samples(self.project_id)
for sample in samples:
files.extend(self.api.getFilesBySample(sample.Id, queryPars=self.params))
return files
def download(self, direc):
if all([self.project_id is None, self.project_name is None]):
self.project_id, self.project_name = self._user_selected_project_id()
files = self._get_files()
self.print_download_info(files)
start = time.time()
for i, f in enumerate(files):
# self.log.write('[ {} ] {}\n'.format(i, str(f)))
logger.info('[ {} ] {}'.format(i, str(f)))
f.downloadFile(self.api, direc)
end = time.time()
self.print_completed_download_info(start, end)
return len(files)
def print_basespace_project(self):
print('')
print('')
print('========================================')
print('BaseSpace Project Selection')
print('========================================')
print('')
def print_download_info(self, files):
logger.info('')
logger.info('')
logger.info('========================================')
logger.info('Downloading files from BaseSpace')
logger.info('========================================')
logger.info('')
logger.info('Identified {0} files for download.'.format(len(files)))
logger.info('')
def print_completed_download_info(self, start, end):
logger.info('')
logger.info('Download completed in {0} seconds'.format(end - start))
def parse_args():
parser = argparse.ArgumentParser("Downloads sequencing data from BaseSpace, Illumina's cloud storage platform.")
parser.add_argument('-d', '--download-directory',
dest='download_directory',
required=True,
help="Directory into which BaseSpace data will be downloaded.")
parser.add_argument('--project-id',
default=None,
help='ID of the project to be downloaded. Optional.')
parser.add_argument('--project-name',
default=None,
help='Name of the project to be downloaded. Optional.')
args = parser.parse_args()
return args
def download(download_directory, project_id=None, project_name=None):
'''
Downloads sequencing data from BaseSpace (Illumina's cloud storage platform).
Before accessing BaseSpace through the AbStar API, you need to set up a
credentials file:
1. You need a BaseSpace access token. The easiest way to do this is to
set up a BaseSpace developer account following
`these instructions <https://support.basespace.illumina.com/knowledgebase/articles/403618-python-run-downloader>`_
2. Make a BaseSpace credentials file using your developer credentials::
$ make_basespace_credfile
and follow the instructions.
Examples:
If you know the name of the project you'd like to download::
from abstar.utils import basespace
basespace.download('/path/to/download_directory', project_name='MyProject')
If you know the ID of the project you'd like to download::
basespace.download('/path/to/download_directory', project_id='ABC123')
If neither ``project_id`` nor ``project_name`` is provided, a list of your available
BaseSpace projects will be provided and you can select a project from that list::
basespace.download('/path/to/download_directory')
Args:
download_directory (str): Directory into which the raw sequences files should
be downloaded. If the directory does not exist, it will be created.
project_id (str): ID of the project to be downloaded.
project_name (str): Name of the project to be downloaded.
Returns:
int: The number of sequence files downloaded.
'''
make_dir(download_directory)
bs = BaseSpace(project_id, project_name)
return bs.download(download_directory)
def copy_from_basemount(basemount_directory, destination_directory):
make_dir(os.path.abspath(destination_directory))
fastqs = []
for (path, dirs, files) in os.walk(basemount_directory):
for f in files:
if f.endswith('.fastq.gz'):
fastqs.append(os.path.join(path, f))
logger.info('')
logger.info('')
logger.info('========================================')
logger.info('Copying files from BaseMount')
logger.info('========================================')
logger.info('')
logger.info('Found {0} FASTQ files.'.format(len(fastqs)))
logger.info('')
logger.info('Copying:')
start = datetime.now()
progress_bar(0, len(fastqs), start_time=start)
for i, fastq in enumerate(fastqs):
dest = os.path.join(destination_directory, os.path.basename(fastq))
copyfile(fastq, dest)
progress_bar(i + 1, len(fastqs), start_time=start)
print('\n')
if __name__ == '__main__':
args = parse_args()
download(args.download_directory,
project_id=args.project_id,
project_name=args.project_name)
| briney/abstar | abstar/utils/basespace.py | Python | mit | 11,356 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import curses
import cumodoro.config as config
import cumodoro.interface as interface
import cumodoro.globals as globals
from cumodoro.cursest import Refresher
import logging
log = logging.getLogger('cumodoro')
def set_title(msg):
print("\x1B]0;%s\x07" % msg)
def get_title():
print("\x1B[23t")
return sys.stdin.read()
def save_title():
print("\x1B[22t")
def restore_title():
print("\x1B[23t")
def main():
globals.refresher = Refresher()
globals.refresher.start()
globals.database.create()
globals.database.load_tasks()
os.environ["ESCDELAY"] = "25"
save_title()
set_title("Cumodoro")
curses.wrapper(interface.main)
restore_title()
| gisodal/cumodoro | cumodoro/main.py | Python | mit | 765 |
import scrapy
from congress.items import Article
class Example(scrapy.Spider):
'''Modeled after scrapy tutorial here:
https://doc.scrapy.org/en/latest/intro/tutorial.html
'''
# spider name should be name of website/source
name = 'example'
def start_requests(self):
# list of URLs or single URL to start carwl
urls = [
'http://quotes.toscrape.com/page/1/'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
'''to learn more about xpath selectors:
https://doc.scrapy.org/en/latest/topics/selectors.html
'''
language = response.xpath('//html/@lang').extract_first()
url = response.url
source = 'example' # spider name
# Select all quotes
quotes = response.xpath('//div[@class="quote"]')
for quote in quotes:
# Each quote is itself a selector xpath.
# To dig deeper inside this path use ".//" syntax
text_blob = quote.xpath('.//span/text()').extract_first()
# Using [contains@class] for illustrative purposes. We could have used
# @class="author" as well. This will return any small tag with class
# that contains the word "author"
author = quote.xpath('.//small[contains(@class, "author")]').extract_first()
# now we create our article item for a list of available fields see items.py
article = Article(
# Required fields
language=language,
url=url,
source=source,
text_blob=text_blob,
# Optional field (add anything your site provides)
authors=author
)
# Sends our article off to the pipeline for validation!
yield article
| jss367/assemble | congress/congress/spiders/example.py | Python | mit | 1,924 |
default_app_config = 'botManager.apps.BotmanagerConfig'
| stefan2904/activismBot | botManager/__init__.py | Python | mit | 56 |
# -*- encoding: UTF-8 -*-
import struct
from time import localtime
import itertools
import codecs
import gevent
from copy import copy
from copy import deepcopy
import screen
from screen import ForegroundColors, BackgroundColors
from screen import Align
from utility import Dimension
from string import lowercase
return_key = ['\r', '\r\x00']
backspace_key = ['\x7f']
arrow_up_key = ['\x1bOA', '\x1b[A']
arrow_down_key = ['\x1bOB', '\x1b[B']
arrow_right_key = ['\x1bOC', '\x1b[C']
arrow_left_key = ['\x1bOD', '\x1b[D']
tab_key = ['\t']
ctrl_x = ['\x18']
shift_key = '' # it doesn't send
delete_key = ['\x1b[3~']
clr_scr = 1
normal = 0
yes = 'Y'
no = 'N'
def isKey(input, key):
for k in key:
if k == input:
return True
return False
def isSingleByte(data):
if '\x1f' < data < '\x7f':
return True
else:
return False
def isDoubleByte(data):
if '\x4e\x00' < data < '\x9f\xff' or '\x81\x40' < data < '\xfe\xff' or '\xa1\x40' < data < '\xf9\xff':
return True
else:
return False
class screenlet(object):
def __init__(self, routine, dimension):
self.routine = routine
self.dimension = dimension
self.controls = []
self.focusIndex = None
def handleData(self, data):
return normal
def add(self, control):
self.controls.append(control)
def focusedControl(self):
return self.controls[self.focusIndex] if self.focusIndex != None else None
def setFocusedControl(self, control):
for i, item in enumerate(self.controls):
item.focused = False
if item == control:
self.focusIndex = i
item.focused = True
return
def update(self, data=''):
for i, item in enumerate(self.controls):
item.update(data if i == self.focusIndex else '')
if not self.focusIndex:
for item in self.controls:
item.update(data)
return self.handleData(data)
def draw(self, force=False):
self.buffer = ""
for item in self.controls:
if item.redrawn and not item.visible:
self.buffer += screen.fillBlank(item.dimension)
item.redrawn = False
for item in self.controls:
if (force or item.redrawn) and item.visible and item.minACL <= self.routine.acl:
self.buffer += item.draw()
item.redrawn = False
focus = ''
if self.focusedControl():
focus = screen.move_cursor(self.focusedControl().focusLine, self.focusedControl().focusColn)
else:
focus = screen.move_cursor(self.routine.height+1, self.routine.width+1)
return self.buffer + focus
class control(object):
def __init__(self, routine, dimension, **kwargs):
self.routine = routine
self.dimension = dimension
self.focusLine = kwargs['focusLine'] if 'focusLine' in kwargs else self.dimension.line
self.focusColn = kwargs['focusColn'] if 'focusColn' in kwargs else self.dimension.coln
self.visible = kwargs['visible'] if 'visible' in kwargs else True
self.minACL = kwargs['minACL'] if 'minACL' in kwargs else 0
self.redrawn = False
self.focused = False
self.buffer = ""
def setVisibility(self, flag):
self.redrawn = True if not flag == self.visible else self.redrawn
self.visible = flag
def update(self, data=''):
pass # set redrawn flag if needed
def draw(self):
return self.buffer
class selectionMenu(control):
def __init__(self, routine, dimension, menu, **kwargs):
super(selectionMenu, self).__init__(routine, dimension, **kwargs)
# menu is a list of tuples with the form (id, title) id = -1 if it is not relevant
self.menu = menu
self.change = [1]*len(self.menu)
self.cursor = 0
self.offset = 0
def index(self):
return self.cursor + self.offset
def redraw(self):
self.change = [1]*len(self.menu)
def update(self, data=''):
if len(self.menu) == 0: # don't need to update if the list is empty
return
print "selectionMenu update"
ind = self.index()
self.change[self.index()] = 1
# self.cursor can only go from 0 to self.height
if isKey(data, arrow_up_key):
if self.cursor == 0:
if self.offset > 0:
self.offset -= 1
self.redraw()
else:
self.cursor -= 1
self.redrawn = True
elif isKey(data, arrow_down_key):
if self.cursor == self.dimension.height-1:
if self.offset + self.dimension.height < len(self.menu)-1:
self.offset += 1
self.redraw()
else:
if self.offset + self.cursor < len(self.menu)-1:
self.cursor += 1
self.redrawn = True
if self.index() == ind:
self.change[self.index()] = 0
else:
self.change[self.index()] = 1
def draw(self):
ind = 0
self.buffer = ""
if any(self.change[self.offset:self.offset+self.dimension.height]):
for i, item in enumerate(self.menu):
line = item[1]
if i >= self.offset and ind < self.dimension.height:
#print i, self.offset, ind, self.dimension.height
if self.change[i]:
if self.cursor == ind:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line, self.dimension.width, Align.Left, fg=ForegroundColors.White, bg=BackgroundColors.Yellow)
else:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line, self.dimension.width, Align.Left)
self.change[i] = 0
ind = ind + 1
return self.buffer
class scrollableMenu(control):
def __init__(self, routine, dimension, menu, **kwargs):
super(scrollableMenu, self).__init__(routine, dimension, **kwargs)
self.menu = menu
self.offset = 0
self.change = [1]*len(self.menu)
def index(self):
return self.offset
def update(self, data=''):
if isKey(data, arrow_up_key):
if self.offset > 0:
self.offset -= 1
self.change = self.change[:self.offset] + [1]*self.dimension.height + self.change[self.offset+self.dimension.height+1:]
self.redrawn = True
elif isKey(data, arrow_down_key):
if self.offset + self.dimension.height < len(self.menu)-1:
self.offset += 1
self.change = self.change[:self.offset] + [1]*self.dimension.height + self.change[self.offset+self.dimension.height+1:]
self.redrawn = True
def draw(self, force=False):
ind = 0
for i, line in enumerate(self.menu):
if i >= self.offset and ind < self.dimension.height:
if self.change[i]:
self.buffer = self.buffer + screen.puts(self.dimension.line + ind, self.dimension.coln, line.strip())
self.change[i] = 0
ind = ind + 1
return self.buffer
class art(control):
def __init__(self, routine, dimension, file, **kwargs):
super(art, self).__init__(routine, dimension, **kwargs)
self.buffer = screen.move_cursor(self.line, self.coln)
self.file = file
for i, line in enumerate(open(self.file)):
if i < self.dimension.height:
self.buffer = self.buffer + line[:self.dimension.width] + screen.move_cursor_down(1) + screen.move_cursor_left(self.dimension.width)
class label(control):
def __init__(self, routine, dimension, msg, length=None, align=Align.Left, **kwargs):
super(label, self).__init__(routine, dimension, **kwargs)
"display a label"
self.data = msg
self.buffer = screen.puts(self.dimension.line, self.dimension.coln, self.data, length, align, **kwargs)
class input(control):
def __init__(self, routine, dimension, **kwargs):
super(input, self).__init__(routine, dimension, **kwargs)
"display an input box"
self.cursor = 0
self.length = self.dimension.width
self.concealed = kwargs['concealed'] if 'concealed' in kwargs else False
self.fg = ForegroundColors.DarkGray if 'fg' not in kwargs else kwargs['fg']
if 'bg' not in kwargs:
if self.concealed:
self.bg = BackgroundColors.Black
else:
self.bg = BackgroundColors.White
else:
self.bg = kwargs['bg']
self.data = kwargs['default'] if 'default' in kwargs else ''
def insert(self):
return len(self.data[:self.cursor].encode('big5'))
def moveRight(self, n):
if self.cursor < min(self.length - n, len(self.data)):
self.cursor += n
def moveLeft(self, n):
if self.cursor >= n:
self.cursor -= n
def backSpace(self):
if self.cursor == 0:
self.data = screen.commands['BEL'] + self.data
elif self.cursor > 0:
self.data = self.data[:self.cursor-1] + self.data[self.cursor:]
self.moveLeft(1)
self.redrawn = True
def delete(self):
self.data = self.data[:self.cursor] + self.data[self.cursor+1:]
self.redrawn = True
def addInput(self, data):
if len(self.data.encode('big5')) + len(data.encode('big5')) <= self.length-1: # minus 1 for the cursor
self.data = self.data[:self.cursor] + data + self.data[self.cursor:]
self.moveRight(len(data))
self.redrawn = True
def update(self, data=''):
if screen.commands['BEL'] in self.data:
self.data = self.data.replace(screen.commands['BEL'], '')
if isKey(data, backspace_key):
self.backSpace()
elif isKey(data, delete_key):
self.delete()
elif isKey(data, arrow_right_key):
self.moveRight(1)
elif isKey(data, arrow_left_key):
self.moveLeft(1)
elif data > '\x20' or data == ' ':
"can't be control bytes, but space is allowed"
self.addInput(data)
if not self.concealed:
self.focusColn = self.dimension.coln + self.insert()
def draw(self):
if not self.concealed:
return screen.puts(self.dimension.line, self.dimension.coln, self.data, self.length, Align.Left, fg=self.fg, bg=self.bg)
else:
return ''
class multilineinput(control):
def __init__(self, routine, dimension, **kwargs):
super(multilineinput, self).__init__(routine, dimension, **kwargs)
self.index = 0 # this is the focus index for the inputs
self.bounds = 0 # it is actually just lower bound
self.inputs = [input(self.routine, Dimension(self.dimension.line + i, self.dimension.coln, self.dimension.width, 1)) for i in xrange(self.dimension.height)]
self.texts = [("", 0)]*self.dimension.height # tuple: (text, position of cursor)
def data(self):
#return '\n'.join([d.data for d in self.inputs])
return '\n'.join([text[0] for text in self.texts])
def update(self, data=''):
# update self.texts
if isKey(data, return_key):
coln = self.texts[self.index + self.bounds][1]
fragment = self.texts[self.index + self.bounds][0][coln:]
print coln, fragment
self.texts[self.index + self.bounds] = (self.texts[self.index + self.bounds][0][:coln], self.texts[self.index + self.bounds][1])
if self.index == self.dimension.height-1:
self.bounds += 1
self.index += 1 if self.index < len(self.inputs)-1 else 0
# TODO:insert fragment to self.texts at self.index + self.bounds + 1
# this line doesn't work yet
self.texts.insert(self.index + self.bounds, (fragment, 0))
self.redrawn = True
elif isKey(data, arrow_up_key):
if self.index == 0 and self.bounds > 0:
self.bounds -= 1
self.redrawn = True
self.index -= 1 if self.index > 0 else 0
elif isKey(data, arrow_down_key):
if self.index == self.dimension.height-1 and self.index + self.bounds < len(self.texts)-1:
self.bounds += 1
self.redrawn = True
self.index += 1 if self.index < len(self.inputs)-1 else 0
'''
for i in xrange(self.dimension.height + self.bounds - len(self.texts)):
self.texts.append(("", 0))
'''
else:
# need to be fixed
if isKey(data, backspace_key) and self.texts[self.bounds + self.index][1] == 0:
space = self.texts[self.bounds + self.index - 1] if self.bounds + self.index > 0 else 0
if self.index == 0 and self.bounds > 0:
self.bounds -= 1
self.redrawn = True
self.index -= 1 if self.index > 0 else 0
self.texts[self.bounds + self.index] = (self.texts[self.bounds + self.index][0], len(self.texts[self.bounds + self.index][0]))
else:
# update self.inputs
self.inputs[self.index].update(data)
self.texts[self.bounds + self.index] = (self.inputs[self.index].data, self.inputs[self.index].cursor)
self.redrawn = True
'''
# don't need to, can just update the one affected
for i in xrange(len(self.inputs)):
ind = i + self.bounds
aninput = self.inputs[i]
self.texts[ind] = (aninput.data, aninput.cursor) if aninput.data != "" else ("", 0)
'''
for i in xrange(len(self.inputs)):
ind = i + self.bounds
self.inputs[i].data = self.texts[ind][0]
self.inputs[i].cursor = self.texts[ind][1] if self.inputs[i].data != "" else 0
self.inputs[i].update()
self.focusLine = self.inputs[self.index].focusLine
self.focusColn = self.inputs[self.index].focusColn + 1
def draw(self):
self.buffer = ''
for text in self.inputs:
self.buffer += text.draw()
return self.buffer
class optioninput(control):
def __init__(self, routine, dimension, header, textandoptions=None, **kwargs):
super(optioninput, self).__init__(routine, dimension, **kwargs)
"in default display a option(s) inputbox"
self.text = header
tmp = []
for text, flag in textandoptions.items():
if text != 'default':
tmp.append(text + " (" + flag + ")")
self.text = ", ".join(tmp)
self.text += " [" + textandoptions['default'] + "] "
self.label = label(self.routine, Dimension(self.dimension.line, self.dimension.coln, len(self.text.encode('big5')), 1), self.text)
self.input = input(self.routine, Dimension(self.dimension.line, self.dimension.coln + len(self.text.encode('big5')) + 1, 2, 1), fg=ForegroundColors.White, bg=BackgroundColors.Black)
def update(self, data=''):
self.input.update(data)
self.redrawn = self.input.redrawn
self.focusLine = self.input.focusLine
self.focusColn = self.input.focusColn
def draw(self):
return self.label.draw() + self.input.draw()
# arguments:
# *routine - the routine class
# *location - the location of the screen
# *title - the middle text on the header
# keywords: focusLine, focusColn, visible, minACL
class header(control):
def __init__(self, routine, location, title, **kwargs):
super(header, self).__init__(routine, Dimension(0, 0, routine.width, 1), **kwargs)
# just for testing
#self.location = u"【 主功能表 】"
#self.title = u"批踢踢py實業坊"
self.buffer = label(self.routine, self.dimension, title, self.dimension.width, Align.Center, fg=ForegroundColors.White, bg=BackgroundColors.LightBlue).buffer + label(self.routine, self.dimension, location, fg=ForegroundColors.Yellow, bg=BackgroundColors.LightBlue).buffer
# arguments:
# *routine - the routine class
# keywords: focusLine, focusColn, visible, minACL
class anykey(control):
def __init__(self, routine, **kwargs):
super(anykey, self).__init__(routine, Dimension(routine.height, 0, routine.width, 1), **kwargs)
self.buffer = label(self.routine, self.dimension, u"按隨意鍵跳出", self.dimension.width, Align.Center, fg=ForegroundColors.Yellow, bg=BackgroundColors.LightBlue).buffer
print repr(self.buffer)
# arguments:
# *routine - the routine class
# keywords: focusLine, focusColn, visible, minACL
class footer(control):
def __init__(self, routine, **kwargs):
super(footer, self).__init__(routine, Dimension(routine.height, 0, routine.width, 1), **kwargs)
self.buffer = label(self.routine, self.dimension, u"批踢踢py實業坊", self.dimension.width, Align.Center, fg=ForegroundColors.White, bg=BackgroundColors.LightBlue).buffer + label(self.routine, self.dimension, u"【 主功能表 】", fg=ForegroundColors.Yellow, bg=BackgroundColors.LightBlue).buffer
'''
self.title = ''
self.time = ''
self.number = ''
time = self.routine.getTime()
constellation = "星座"
online = "線上" + str(self.routine.getOnlineCount()) + "人"
id = "我是" + self.routine.id
self.routine.buffer.format_put(screen.height, 0, time, len(time),
True, Colors.Cyan, Colors.Blue, Align.Center)
self.routine.buffer.format_put_on_cursor(constellation, len(constellation),
True, Colors.White, Colors.Magenta, Align.Center)
self.routine.buffer.format_put_on_cursor(online, len(online) + 10,
True, Colors.Cyan, Colors.Blue)
self.routine.buffer.format_put_on_cursor(id, len(id) + 20,
True, Colors.Cyan, Colors.Blue)
'''
# arguments:
# *routine - the routine class
# *dimension - a Dimension object
class login(screenlet):
def __init__(self, routine, dimension):
super(login, self).__init__(routine, dimension)
self.id = ''
self.pw = ''
title = u'=Welcome to BBS='
warning = u'帳號或密碼有錯誤,請重新輸入。'
lenwarning = len(warning.encode('big5'))
enterid = u'請輸入帳號,或以 guest 參觀,或以 new 註冊: '
lenenterid = len(enterid.encode('big5'))
enterpw = u'請輸入密碼:'
lenenterpw = len(enterpw.encode('big5'))
#artwork = art(self.routine, 0, 0, self.width, self.height, '../../res/Welcome_birth')
self.artwork = label(self.routine, Dimension(self.dimension.height / 2, 0, self.dimension.width, self.dimension.height), title, self.dimension.width, Align.Center, fg=ForegroundColors.White, bg=BackgroundColors.White)
self.idLabel = label(self.routine, Dimension(self.dimension.height / 2 + 5, 0, lenenterid, 1), enterid)
self.idInput = input(self.routine, Dimension(self.dimension.height / 2 + 5, lenenterid+1, 20, 1))
self.pwLabel = label(self.routine, Dimension(self.dimension.height / 2 + 5, 0, lenenterpw, 1), enterpw, visible=False)
self.pwInput = input(self.routine, Dimension(self.dimension.height / 2 + 5, lenenterpw+1, 20, 1), concealed=True, visible=False)
self.warningLabel = label(self.routine, Dimension(self.dimension.height / 2 + 6, 0, lenwarning, 1), warning, visible=False)
self.add(self.idLabel)
self.add(self.idInput)
self.add(self.pwLabel)
self.add(self.pwInput)
self.add(self.artwork)
self.add(self.warningLabel)
self.setFocusedControl(self.idInput)
def registrationProcedures(self):
self.routine.stack.pop()
self.routine.stack.push(registration(self.routine, self.dimension))
def loginProcedures(self):
self.routine.id = self.id
self.routine.createSession()
self.routine.stack.pop()
self.routine.stack.push(welcome(self.routine, self.dimension))
def handleData(self, data=''):
if isKey(data, return_key):
self.id = self.idInput.data
self.pw = self.pwInput.data
if len(self.id) > 0 and self.idInput.visible:
if self.id == "new":
self.registrationProcedures()
return clr_scr
elif self.id == "guest":
self.loginProcedures()
return clr_scr
self.idLabel.setVisibility(False)
self.idInput.setVisibility(False)
self.pwLabel.setVisibility(True)
self.pwInput.setVisibility(True)
self.warningLabel.setVisibility(False)
self.setFocusedControl(self.pwInput)
elif self.pwInput.visible:
# validate id and pw
if self.routine.userLookup(self.id, self.pw):
self.loginProcedures()
return clr_scr
else:
self.pwInput.data = ""
self.idLabel.setVisibility(True)
self.idInput.setVisibility(True)
self.pwLabel.setVisibility(False)
self.pwInput.setVisibility(False)
self.setFocusedControl(self.idInput)
self.warningLabel.setVisibility(True)
return normal
class welcome(screenlet):
def __init__(self, routine, dimension):
super(welcome, self).__init__(routine, dimension)
self.anykey = anykey(self.routine)
self.add(self.anykey)
def handleData(self, data=''):
if len(data) > 0:
self.routine.stack.pop()
self.routine.stack.push(topMenu(self.routine, self.dimension))
return clr_scr
return normal
class topMenu(screenlet):
def __init__(self, routine, dimension):
super(topMenu, self).__init__(routine, dimension)
self.items = [(-1, "(A)nnouncement"),
(-1, "(B)oards"),
(-1, "(C)hatroom"),
(-1, "(G)ames"),
(-1, "(S)ettings"),
(-1, "(Q)uit")]
self.selectionMenu = selectionMenu(self.routine, Dimension(4, 20, 45, self.dimension.height - 2), self.items)
self.header = header(self.routine, u"【主功能表】", u"批踢踢py實業坊")
self.footer = footer(self.routine)
self.add(self.header)
self.add(self.footer)
self.add(self.selectionMenu)
def handleData(self, data=''):
if isKey(data, arrow_left_key):
pass
elif isKey(data, arrow_right_key) or isKey(data, return_key):
self.selectionMenu.redraw()
if self.selectionMenu.index() == 0: # Announcement
self.routine.stack.push(announce(self.routine, self.dimension))
elif self.selectionMenu.index() == 1: # Boards
self.routine.stack.push(boardlist(self.routine, self.dimension, None))
elif self.selectionMenu.index() == 2: # Chatroom
self.routine.stack.push(notfinished(self.routine, self.dimension))
elif self.selectionMenu.index() == 3: # Games
self.routine.stack.push(notfinished(self.routine, self.dimension))
elif self.selectionMenu.index() == 4: # Settings
self.routine.stack.push(notfinished(self.routine, self.dimension))
else: # Quit
# double check with input textbox
# confirm
self.routine.stack.push(quit(self.routine, self.dimension))
return clr_scr
return normal
class quit(screenlet):
def __init__(self, routine, dimension):
super(quit, self).__init__(routine, dimension)
self.msg = u'Bye bye'
self.add(footer(self.routine))
self.add(label(self.routine, Dimension(self.dimension.height / 2, 0, len(self.msg.encode('big5')), 1), self.msg))
def handleData(self, data=''):
if len(data) > 0:
self.routine.disconnect()
return clr_scr
return normal
class notfinished(screenlet):
def __init__(self, routine, dimension):
super(notfinished, self).__init__(routine, dimension)
self.msg = u'對不起,請稍後再試!'
self.add(footer(self.routine))
self.add(label(self.routine, Dimension(self.dimension.height / 2, 0, len(self.msg.encode('big5')), 1), self.msg))
def handleData(self, data=''):
if len(data) > 0:
self.routine.stack.pop()
return clr_scr
return normal
class createBoard(screenlet):
def __init__(self, routine, dimension, boardid):
super(createBoard, self).__init__(routine, dimension)
self.boardid = boardid
self.title = ''
warning = u'標題格式有錯誤,請重新輸入。'
enterid = u'請輸入看板標題: '
self.titlelabel = label(self.routine, Dimension(self.dimension.height / 2 + 5, 0, len(enterid.encode('big5')), 1), enterid)
self.titleinput = input(self.routine, Dimension(self.dimension.height / 2 + 5, len(enterid.encode('big5'))+1, 20, 1))
self.warninglabel = label(self.routine, Dimension(self.dimension.height / 2 + 6, 0, len(warning.encode('big5')), 1), warning, visible=False)
self.add(self.titlelabel)
self.add(self.titleinput)
self.add(self.warninglabel)
self.setFocusedControl(self.titleinput)
def handleData(self, data=''):
self.title = self.titleinput.data
if isKey(data, tab_key):
pass # implement custom cycle
if isKey(data, return_key):
if self.routine.addBoard(self.boardid, self.title):
print "added board successfully"
self.routine.stack.pop() #destroy it
self.routine.stack.push(boardlist(self.routine, self.dimension, self.boardid))
return clr_scr
else:
self.warninglabel.setVisibility(True)
return normal
class createThread(screenlet):
def __init__(self, routine, dimension, boardid):
super(createThread, self).__init__(routine, dimension)
self.boardid = boardid
self.title = ''
self.content = ''
warning = u'標題格式有錯誤,請重新輸入。'
enterid = u'請輸入文章標題: '
self.titlelabel = label(self.routine, Dimension(3, 0, len(enterid.encode('big5')), 1), enterid)
self.titleinput = input(self.routine, Dimension(3, len(enterid.encode('big5'))+1, 20, 1))
self.warninglabel = label(self.routine, Dimension(4, 0, len(warning.encode('big5')), 1), warning, visible=False)
# need a control for multi-line input
self.contentinput = multilineinput(self.routine, Dimension(6, 0, self.dimension.width, 5))
# need a control for confirmation
self.confirmationinput = optioninput(self.routine, Dimension(self.dimension.height - 3, 0, self.dimension.width, 1), u'請確認', {u'發送' : 'Y', u'繼續編輯' : 'N', 'default' : 'N'})
self.add(self.titlelabel)
self.add(self.titleinput)
self.add(self.warninglabel)
self.add(self.contentinput)
self.add(self.confirmationinput)
self.setFocusedControl(self.titleinput)
def handleData(self, data=''):
self.title = self.titleinput.data
self.content = self.contentinput.data()
if isKey(data, tab_key):
if self.titleinput.focused:
self.setFocusedControl(self.contentinput)
elif self.contentinput.focused:
self.setFocusedControl(self.confirmationinput)
elif self.confirmationinput.focused:
self.setFocusedControl(self.titleinput)
if isKey(data, return_key):
if self.confirmationinput.focused and self.confirmationinput.input.data == 'Y':
if self.routine.addThread(self.boardid, self.title, self.content):
print "added thread successfully"
self.routine.stack.pop() #destroy it
self.routine.stack.push(threadlist(self.routine, self.dimension, self.boardid))
return clr_scr
else:
self.setFocusedControl(self.contentinput)
return normal
class announce(screenlet):
def __init__(self, routine, dimension):
super(announce, self).__init__(routine, dimension)
self.announcements = scrollableMenu(self.routine, Dimension(4, 0, self.dimension.width, self.dimension.height-2), self.routine.loadAnnouncement())
self.header = header(self.routine, u"【公佈欄】", u"批踢踢py實業坊")
self.footer = footer(self.routine)
self.add(self.announcements)
self.add(self.header)
self.add(self.footer)
def handleData(self, data=''):
if isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
return normal
class boardlist(screenlet):
def __init__(self, routine, dimension, boardid):
super(boardlist, self).__init__(routine, dimension)
self.boardid = boardid
self.boards = selectionMenu(self.routine, Dimension(3, 0, self.dimension.width, self.dimension.height), self.routine.loadBoards(self.boardid))
self.administration = label(self.routine, Dimension(2, 0, self.dimension.width, 1),
"Key C-x creates a board here", self.dimension.width, Align.Left)
self.header = header(self.routine, u"【看板列表】", u"批踢踢py實業坊")
self.footer = footer(self.routine)
self.add(self.administration)
self.add(self.boards)
self.add(self.header)
self.add(self.footer)
def handleData(self, data=''):
if isKey(data, ctrl_x):
self.routine.stack.pop() # destroy it first
self.routine.stack.push(createBoard(self.routine, self.dimension, self.boardid))
return clr_scr
elif isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
elif isKey(data, arrow_right_key):
if len(self.boards.menu) > 0:
self.boards.redraw()
self.routine.stack.push(threadlist(self.routine, self.dimension, self.boards.menu[self.boards.index()][0]))
return clr_scr
return normal
class threadlist(screenlet):
def __init__(self, routine, dimension, boardid):
super(threadlist, self).__init__(routine, dimension)
self.boardid = boardid
self.threads = selectionMenu(self.routine, Dimension(3, 0, self.dimension.width, self.dimension.height), self.routine.loadThreads(self.boardid))
self.administration = label(self.routine, Dimension(2, 0, self.dimension.width, 1), "Key C-x creates a thread", self.dimension.width, Align.Left)
self.header = header(self.routine, u"【版主:】", u"批踢踢py實業坊")
self.footer = footer(self.routine)
self.add(self.administration)
self.add(self.threads)
self.add(self.header)
self.add(self.footer)
def handleData(self, data=''):
if isKey(data, ctrl_x):
self.routine.stack.pop() #destroy it first
self.routine.stack.push(createThread(self.routine, self.dimension, self.boardid))
return clr_scr
elif isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
elif isKey(data, arrow_right_key):
if len(self.threads.menu) > 0:
self.threads.redraw()
self.routine.stack.push(threadViewer(self.routine, self.dimension, self.threads.menu[self.threads.index()][0])) # just rowid
return clr_scr
return normal
class threadViewer(screenlet):
def __init__(self, routine, dimension, threadid):
super(threadViewer, self).__init__(routine, dimension)
self.threadid = threadid
self.content = scrollableMenu(self.routine, Dimension(2, 0, self.dimension.width, self.dimension.height - 3), self.routine.loadThread(self.threadid).split("\n"))
self.add(self.content)
def handleData(self, data=''):
if isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
return normal
class editor(screenlet):
def __init__(self, routine, dimension, content):
super(editor, self).__init__(routine, dimension)
self.content = content
self.footer = footer()
def handleData(self, data=''):
if isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
return normal
class registration(screenlet):
def __init__(self, routine, dimension):
super(registration, self).__init__(routine, dimension)
def handleData(self, data=''):
if isKey(data, arrow_left_key):
self.routine.stack.pop()
return clr_scr
return normal
def evalString(string):
return globals()[string]
| pencilcheck/pttbbs-py | src/pttbbs/screenlets.py | Python | mit | 33,791 |
from Gaudi.Configuration import *
from Configurables import DaVinci
#from Configurables import AlgTool
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
#For 2012 MC
DaVinci.DDDBtag='dddb-20130929-1'
DaVinci.CondDBtag='sim-20130522-1-vc-md100'
#for 2011 MC
#DaVinci.DDDBtag='dddb-20130929'
#DaVinci.CondDBtag='sim-20130522-vc-mu100'
simulation=True
#################################################################
#Rerun with stripping21 applied
if simulation:
from Configurables import EventNodeKiller
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
event_node_killer=EventNodeKiller('StripKiller')
event_node_killer.Nodes=['Event/AllStreams','/Event/Strip']
from Configurables import PhysConf
PhysConf().CaloReProcessing=True
stripping="stripping21"
config=strippingConfiguration(stripping)
archive=strippingArchive(stripping)
streams=buildStreams(stripping=config,archive=archive)
MyStream= StrippingStream("MyStream")
MyLines= ["StrippingB2XEtaB2eta3piKstarLine"]
for stream in streams:
for line in stream.lines:
if line.name() in MyLines:
MyStream.appendLines( [ line ])
from Configurables import ProcStatusCheck
filterBadEvents=ProcStatusCheck()
sc=StrippingConf( Streams= [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = False,
BadEventSelection = filterBadEvents)
DaVinci().appendToMainSequence([event_node_killer,sc.sequence()])
##################Creating NTuples#####################################
from Configurables import DecayTreeTuple
from Configurables import TupleToolL0Calo
from DecayTreeTuple.Configuration import *
line = 'B2XEtaB2eta3piKstarLine'
tuple=DecayTreeTuple()
tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta -> ^pi- ^pi+ ^(pi0 -> ^gamma ^gamma))]CC"
tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC"}
tuple.Inputs=['/Event/Phys/{0}/Particles'.format(line)]
tuple.addTool(TupleToolL0Calo())
tuple.TupleToolL0Calo.TriggerClusterLocation="/Event/Trig/L0/Calo"
tuple.TupleToolL0Calo.WhichCalo="HCAL"
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
# , "TupleToolL0Calo"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
, "TupleToolTrackIsolation"
, "TupleToolPi0Info"
]
tuple.addTool(TupleToolDecay,name="B0")
from Configurables import TupleToolDecayTreeFitter
#========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/ConsAll')
tuple.B0.ConsAll.Verbose=True
tuple.B0.ConsAll.constrainToOriginVertex=True
tuple.B0.ConsAll.daughtersToConstrain = ["K*(892)0","eta"]
#==============================REFIT WITH ETA, PI0 AND PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpf')
tuple.B0.PVFitpf.Verbose=True
tuple.B0.PVFitpf.constrainToOriginVertex=True
tuple.B0.PVFitpf.daughtersToConstrain = ["eta","pi0"]
#==============================REFIT WITH ONLY ETA AND PV CONSTRAINED==========================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFit')
tuple.B0.PVFit.Verbose=True
tuple.B0.PVFit.constrainToOriginVertex=True
tuple.B0.PVFit.daughtersToConstrain = ["eta"]
#==============================REFIT WITH ETA AND PV K for piCONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforpi')
tuple.B0.PVFitKforpi.Verbose=True
tuple.B0.PVFitKforpi.constrainToOriginVertex=True
tuple.B0.PVFitKforpi.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforpi.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "pi+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "pi-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforK')
tuple.B0.PVFitpiminusforK.Verbose=True
tuple.B0.PVFitpiminusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forK')
tuple.B0.PVFitpiminus0forK.Verbose=True
tuple.B0.PVFitpiminus0forK.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforK')
tuple.B0.PVFitpiplusforK.Verbose=True
tuple.B0.PVFitpiplusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "K+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "K-" ,
}
#proton swaps
#==============================REFIT WITH ETA AND PV K for proton CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforproton')
tuple.B0.PVFitKforproton.Verbose=True
tuple.B0.PVFitKforproton.constrainToOriginVertex=True
tuple.B0.PVFitKforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforproton.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforproton')
tuple.B0.PVFitpiminusforproton.Verbose=True
tuple.B0.PVFitpiminusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forproton')
tuple.B0.PVFitpiminus0forproton.Verbose=True
tuple.B0.PVFitpiminus0forproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforproton')
tuple.B0.PVFitpiplusforproton.Verbose=True
tuple.B0.PVFitpiplusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgammaforpi0')
tuple.B0.PVFitgammaforpi0.Verbose=True
tuple.B0.PVFitgammaforpi0.constrainToOriginVertex=True
tuple.B0.PVFitgammaforpi0.daughtersToConstrain = ["eta"]
tuple.B0.PVFitgammaforpi0.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> ^gamma gamma))" : "pi0" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> ^gamma gamma))" : "pi0" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitgamma0forpi0')
tuple.B0.PVFitgamma0forpi0.Verbose=True
tuple.B0.PVFitgamma0forpi0.constrainToOriginVertex=True
tuple.B0.PVFitgamma0forpi0.daughtersToConstrain = ["eta"]
tuple.B0.PVFitgamma0forpi0.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma ^gamma))" : "pi0" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ pi- (pi0 -> gamma ^gamma))" : "pi0" ,
}
#==============================REFIT WITH ONLY K* CONSTRAINED===================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/KStarOnly')
tuple.B0.KStarOnly.Verbose=True
tuple.B0.KStarOnly.constrainToOriginVertex=True
tuple.B0.KStarOnly.daughtersToConstrain = ["K*(892)0"]
#==============================REFIT WITH ONLY PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVOnly')
tuple.B0.PVOnly.Verbose=True
tuple.B0.PVOnly.constrainToOriginVertex=True
#========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Conskstar_eta')
tuple.B0.Conskstar_eta.Verbose=True
tuple.B0.Conskstar_eta.constrainToOriginVertex=False
tuple.B0.Conskstar_eta.daughtersToConstrain = ["K*(892)0","eta"]
#========================================REFIT WITH NOTHING CONSTRAINED========================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Consnothing')
tuple.B0.Consnothing.Verbose=True
tuple.B0.Consnothing.constrainToOriginVertex=False
#========================================LOKI FUBNCTOR VARIABLES========================================
tuple.addBranches({'Kstar' : '[B0 -> ^(K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC',
'eta' : '[B0 -> (K*(892)0 -> K+ pi-) ^(eta -> pi- pi+ (pi0 -> gamma gamma))]CC',
'Kplus' : '[B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC',
'piminus' : '[B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC',
'piplus' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))]CC',
'piminus0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))]CC',
'gamma' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> ^gamma gamma))]CC',
'gamma0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma ^gamma))]CC',
'pi0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ ^(pi0 -> gamma gamma))]CC'})
from LoKiPhys.decorators import MAXTREE,MINTREE,ISBASIC,HASTRACK,SUMTREE,PT,ABSID,NINTREE,ETA,TRPCHI2
B0_hybrid=tuple.B0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_B0')
Kstar_hybrid=tuple.Kstar.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kstar')
eta_hybrid=tuple.eta.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_eta')
Kplus_hybrid=tuple.Kplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kplus')
piminus_hybrid=tuple.piminus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus')
piplus_hybrid=tuple.piplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piplus')
piminus0_hybrid=tuple.piminus0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus0')
gamma_hybrid=tuple.gamma.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma')
gamma0_hybrid=tuple.gamma0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma0')
pi0_hybrid=tuple.pi0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_pi0')
preamble=[
'TRACK_MAX_PT= MAXTREE(PT, ISBASIC & HASTRACK, -666)',
'TRACK_MIN_PT= MINTREE(PT, ISBASIC & HASTRACK)',
'SUMTRACK_PT= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),PT)',
'SUM_PCHI2= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),TRPCHI2)'
]
B0_hybrid.Preambulo=preamble
B0_hybrid.Variables = {
'max_pt_track' : 'TRACK_MAX_PT',
'min_pt_track' : 'TRACK_MIN_PT',
'sum_track_pt' : 'SUMTRACK_PT',
'sum_pchi2' : 'SUM_PCHI2',
'n_highpt_tracks' : 'NINTREE(ISBASIC & HASTRACK & (PT>250.0*MeV))',
'eta' :'ETA'
}
Kstar_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
eta_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
Kplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus_hybrid.Variables ={
'eta': 'ETA'
}
piplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus0_hybrid.Variables ={
'eta': 'ETA'
}
gamma_hybrid.Variables = {
'eta':'ETA'
}
gamma0_hybrid.Variables = {
'eta':'ETA'
}
pi0_hybrid.Variables = {
'eta':'ETA'
}
#==============================MassSubs=====================================
from Configurables import TupleToolSubMass
tuple.B0.addTool(TupleToolSubMass)
tuple.B0.ToolList += ["TupleToolSubMass"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => K-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => pi+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => K+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => p~-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => pi0"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e-"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => mu-"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => mu+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi0 => eta"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["K+/pi- => pi+/K-"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => pi-/pi+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => mu+/mu-"]
#==============================TRIGGER DECISIONS==============================-
from Configurables import TupleToolTISTOS
tistos=tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
from Configurables import TupleToolL0Calo
tuple.Kplus.addTool(TupleToolL0Calo,name="KplusL0Calo")
tuple.Kplus.ToolList += ["TupleToolL0Calo/KplusL0Calo"]
tuple.Kplus.KplusL0Calo.WhichCalo="HCAL"
tuple.piplus.addTool(TupleToolL0Calo,name="piplusL0Calo")
tuple.piplus.ToolList += ["TupleToolL0Calo/piplusL0Calo"]
tuple.piplus.piplusL0Calo.WhichCalo="HCAL"
tuple.piminus.addTool(TupleToolL0Calo,name="piminusL0Calo")
tuple.piminus.ToolList += ["TupleToolL0Calo/piminusL0Calo"]
tuple.piminus.piminusL0Calo.WhichCalo="HCAL"
tuple.piminus0.addTool(TupleToolL0Calo,name="piminus0L0Calo")
tuple.piminus0.ToolList += ["TupleToolL0Calo/piminus0L0Calo"]
tuple.piminus0.piminus0L0Calo.WhichCalo="HCAL"
#================================CONFIGURE TUPLETOOLMCTRUTH========================================================
from Configurables import TupleToolMCTruth
tuple.addTool(TupleToolMCTruth)
tuple.ToolList += ["TupleToolMCTruth"]
tuple.TupleToolMCTruth.ToolList += [
"MCTupleToolHierarchy",
"MCTupleToolKinematic",
# "MCTupleToolDecayType",
# "MCTupleToolReconstructed",
# "MCTupleToolPID",
# "MCTupleToolP2VV",
# "MCTupleToolAngles",
# "MCTupleToolInteractions",
# "MCTupleToolPrimaries",
# "MCTupleToolPrompt"
]
etuple=EventTuple()
etuple.ToolList=["TupleToolEventInfo"]
from Configurables import MCDecayTreeTuple
mctuple=MCDecayTreeTuple("mctuple")
mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"]
mctuple.Decay="[[B0]cc => ^(K*(892)0 => ^K+ ^pi-) ^(eta => ^pi- ^pi+ ^(pi0=> ^gamma ^gamma))]CC"
MySequencer.Members.append(etuple)
MySequencer.Members.append(tuple)
MySequencer.Members.append(mctuple)
DaVinci().InputType='DST'
DaVinci().UserAlgorithms+=[MySequencer]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=simulation
#from GaudiConf import IOHelper
# Use the local input data
#IOHelper().inputFiles([
# '00038851_00000006_2.AllStreams.dst'
#], clear=True)
| Williams224/davinci-scripts | ksteta3pi/NTupleMaker_MagDown.py | Python | mit | 17,928 |
import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context():
sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
# read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
"""Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"agency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadDomainValues(basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
loadProgramActivity(localProgramActivity)
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
)
| chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | dataactvalidator/scripts/loadFile.py | Python | cc0-1.0 | 4,378 |
import OOMP
newPart = OOMP.oompItem(9019)
newPart.addTag("oompType", "HESH")
newPart.addTag("oompSize", "03")
newPart.addTag("oompColor", "L")
newPart.addTag("oompDesc", "STAN")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
| oomlout/oomlout-OOMP | old/OOMPpart_HESH_03_L_STAN_01.py | Python | cc0-1.0 | 241 |
'''Tests for currency.py'''
import unittest
from decimal import Decimal
from mexbtcapi.currency import Currency, ExchangeRate, Amount, CurrencyPair
class CurrencytTest(unittest.TestCase):
def test_create(self):
c1 = Currency("c1")
self.assertIsInstance(c1, Currency)
def test_equality(self):
c1, c1_, c2 = Currency("c1"), Currency("c1"), Currency("c2")
self.assertEqual(c1, c1_)
self.assertNotEqual(c1, c2)
def test_hash(self):
c1, c1_, c2 = Currency("c1"), Currency("c1"), Currency("c2")
self.assertEqual(set((c1,c1_)), set((c1,)))
self.assertEqual(len({c1:1, c1_:1}), 1)
self.assertEqual(len({c1:1, c2:1}), 2)
self.assertEqual(len({c1:1, "c1":1}), 2)
class CurrencyPairTest(unittest.TestCase):
@staticmethod
def create_pair():
c1, c2, c3 = Currency("c1"), Currency("c2"), Currency("c3")
p1, p1_, p2, p3= CurrencyPair(c1,c2), CurrencyPair(c1,c3)
def test_equality(self):
c1, c1_, c2, c3 = Currency("c1"), Currency("c1"), Currency("c2"), Currency("c3")
p1, p1_, p2, p3= CurrencyPair(c1,c2), CurrencyPair(c1_,c2), CurrencyPair(c1,c3), CurrencyPair(c3,c2)
self.assertEqual(p1,p1_)
self.assertNotEqual(p1,p2)
self.assertNotEqual(p1,p3)
def test_equality_reversed(self):
c1, c2 = Currency("c1"), Currency("c2")
self.assertNotEqual(CurrencyPair(c1,c2), CurrencyPair(c2, c1))
def test_hash(self):
c1, c1_, c2 = Currency("c1"), Currency("c1"), Currency("c2")
p1, p1_, p2 = CurrencyPair(c1,c2), CurrencyPair(c1_,c2), CurrencyPair(c2,c1)
self.assertEqual(set((p1,p1_)), set((p1,)))
self.assertEqual(len({p1:1, p1_:1}), 1)
self.assertEqual(len({p1:1, p2:1}), 2)
# TODO: test all CurrencyPair methods
class AmountTest(unittest.TestCase):
@staticmethod
def create_amount():
c1, c2 = Currency("C1"), Currency("C2")
amount = Amount('1.0', c1)
return c1, c2, amount
def test_create(self):
c1, _, amount = AmountTest.create_amount()
self.assertIsInstance(amount, Amount)
#multiplying a number by a currency should give an Amount
self.assertIsInstance(1 * c1, Amount)
self.assertIsInstance('1' * c1, Amount)
self.assertIsInstance(1.0 * c1, Amount)
#initializing with diferent types should have the same result
self.assertEqual(amount, 1 * c1)
self.assertEqual(amount, '1' * c1)
self.assertEqual(amount, 1.0 * c1)
def test_properties(self):
c1, _, amount = AmountTest.create_amount()
self.assertEqual(amount.value, 1.0)
self.assertEqual(amount.currency, c1)
def test_decimal_handling(self):
c1, _, _ = AmountTest.create_amount()
self.assertNotEqual(0.1 * c1, '0.1' * c1)
small_decimal = Decimal('0.0001')
small_float = float(small_decimal)
small_amount = small_decimal * c1
total_decimal, total_float, total_amount = Decimal(0), 0.0, 0*c1
for _ in range(10000):
total_decimal += small_decimal
total_float += small_float
total_amount += small_amount
self.assertEqual(total_decimal, 1.0)
self.assertNotEqual(total_float, 1.0)
self.assertEqual(total_amount, 1*c1)
class ExchangeRateTest(unittest.TestCase):
@staticmethod
def create_er():
c1, c2 = Currency("C1"), Currency("C2")
er = ExchangeRate(denominator_currency=c1, numerator_currency=c2, rate='2.0')
return c1, c2, er
def test_create(self):
_, _, er = ExchangeRateTest.create_er()
self.assertIsInstance(er, ExchangeRate)
def test_create_by_division(self):
c1, c2, _ = ExchangeRateTest.create_er()
er1 = (2.0*c1) / c2 #2*c1 == 1*c2
self.assertEqual(er1.denominator, c2)
self.assertEqual(er1.numerator, c1)
def test_properties(self):
c1, c2, er = ExchangeRateTest.create_er()
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
def test_convert(self):
c1, c2, er = ExchangeRateTest.create_er()
self.assertEqual(er.convert(Amount(1, c1)), 2*c2)
self.assertEqual(er.convert(Amount(1, c2)), 0.5*c1)
self.assertEqual(er.convert(Amount(1, c1), c1), 1*c1)
self.assertEqual(er.convert(Amount(1, c1), c2), 2*c2)
self.assertRaises(CurrencyPair.WrongCurrency, lambda: er.convert(Amount(1, c1), Currency('c')))
def test_reverse(self):
_, _, er1 = ExchangeRateTest.create_er()
er2 = er1.reverse()
self.assertEqual(er1.denominator, er2.numerator)
self.assertEqual(er1.numerator, er2.denominator)
self.assertEqual(er1.rate, 1 / er2.rate)
def test_convert_exchangerate(self):
c1, c2, c3, c4 = map(Currency, ('c1', 'c2', 'c3', 'c4'))
er1 = 2 * c1 / c2
er2 = 3 * c2 / c3
er3 = 3 * c3 / c4
self.assertEqual(er1.convert_exchangerate(er2), 6 * c1 / c3)
self.assertEqual(er1.reverse().convert_exchangerate(er2), 6 * c1 / c3)
self.assertEqual(er1.convert_exchangerate(er2.reverse()), 6 * c1 / c3)
self.assertRaises(CurrencyPair.WrongCurrency, lambda: er1.convert_exchangerate(er3))
# def test_other_currency(self):
# c1, c2, er = ExchangeRateTest.create_er()
# self.assertEqual(er.other_currency(c1), c2)
# self.assertEqual(er.other_currency(c2), c1)
def test_inverse(self):
c1, c2, er = ExchangeRateTest.create_er()
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
inverse = er.inverse()
self.assertEqual(inverse.denominator, c2)
self.assertEqual(inverse.numerator, c1)
self.assertEqual(inverse.rate, 2.0)
def test_per_by(self):
c1, c2, _ = ExchangeRateTest.create_er()
er = 2 * c2/c1
#
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
er = er.by(c1)
self.assertEqual(er.denominator, c2)
self.assertEqual(er.numerator, c1)
self.assertEqual(er.rate, 0.5)
er = er.by(c1)
self.assertEqual(er.denominator, c2)
self.assertEqual(er.numerator, c1)
self.assertEqual(er.rate, 0.5)
er = er.per(c1)
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
er = er.per(c1)
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
#
er = er.by(c2)
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
er = er.by(c2)
self.assertEqual(er.denominator, c1)
self.assertEqual(er.numerator, c2)
self.assertEqual(er.rate, 2.0)
er = er.per(c2)
self.assertEqual(er.denominator, c2)
self.assertEqual(er.numerator, c1)
self.assertEqual(er.rate, 0.5)
er = er.per(c2)
self.assertEqual(er.denominator, c2)
self.assertEqual(er.numerator, c1)
self.assertEqual(er.rate, 0.5)
if __name__ == '__main__':
unittest.main()
| goncalopp/mexbtcapi | mexbtcapi/test/test_currency.py | Python | cc0-1.0 | 7,419 |