repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
mambocab/cassandra-dtest
|
refs/heads/master
|
cassandra-thrift/v11/__init__.py
|
66
|
__all__ = ['ttypes', 'constants', 'Cassandra']
|
xinjiguaike/edx-platform
|
refs/heads/master
|
lms/djangoapps/verify_student/migrations/0004_auto__add_verificationcheckpoint__add_unique_verificationcheckpoint_co.py
|
98
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'VerificationCheckpoint'
db.create_table('verify_student_verificationcheckpoint', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('checkpoint_name', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('verify_student', ['VerificationCheckpoint'])
# Adding M2M table for field photo_verification on 'VerificationCheckpoint'
m2m_table_name = db.shorten_name('verify_student_verificationcheckpoint_photo_verification')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('verificationcheckpoint', models.ForeignKey(orm['verify_student.verificationcheckpoint'], null=False)),
('softwaresecurephotoverification', models.ForeignKey(orm['verify_student.softwaresecurephotoverification'], null=False))
))
db.create_unique(m2m_table_name, ['verificationcheckpoint_id', 'softwaresecurephotoverification_id'])
# Adding unique constraint on 'VerificationCheckpoint', fields ['course_id', 'checkpoint_name']
db.create_unique('verify_student_verificationcheckpoint', ['course_id', 'checkpoint_name'])
# Adding model 'VerificationStatus'
db.create_table('verify_student_verificationstatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('checkpoint', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['verify_student.VerificationCheckpoint'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('status', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('response', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('error', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('verify_student', ['VerificationStatus'])
def backwards(self, orm):
# Removing unique constraint on 'VerificationCheckpoint', fields ['course_id', 'checkpoint_name']
db.delete_unique('verify_student_verificationcheckpoint', ['course_id', 'checkpoint_name'])
# Deleting model 'VerificationCheckpoint'
db.delete_table('verify_student_verificationcheckpoint')
# Removing M2M table for field photo_verification on 'VerificationCheckpoint'
db.delete_table(db.shorten_name('verify_student_verificationcheckpoint_photo_verification'))
# Deleting model 'VerificationStatus'
db.delete_table('verify_student_verificationstatus')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'4f091843-1377-4d3b-af5d-3a4ae3d17943'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_name'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
|
GitAngel/django
|
refs/heads/master
|
tests/bash_completion/management/commands/__init__.py
|
12133432
| |
cjgibson/hkvguqktacuranriagqecvebgwbjnlakvhaqytvtbyuvxt
|
refs/heads/master
|
48097711/043/e.py
|
12133432
| |
XonqNopp/sphinx-git
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
AKSW/LODStats_WWW
|
refs/heads/master
|
rdfstats/tests/functional/test_rdf_classes.py
|
1
|
from rdfstats.tests import *
class TestRdfClassesController(TestController):
def test_index(self):
response = self.app.get(url('rdf_classes'))
# Test response...
def test_index_as_xml(self):
response = self.app.get(url('formatted_rdf_classes', format='xml'))
def test_create(self):
response = self.app.post(url('rdf_classes'))
def test_new(self):
response = self.app.get(url('new_rdf_class'))
def test_new_as_xml(self):
response = self.app.get(url('formatted_new_rdf_class', format='xml'))
def test_update(self):
response = self.app.put(url('rdf_class', id=1))
def test_update_browser_fakeout(self):
response = self.app.post(url('rdf_class', id=1), params=dict(_method='put'))
def test_delete(self):
response = self.app.delete(url('rdf_class', id=1))
def test_delete_browser_fakeout(self):
response = self.app.post(url('rdf_class', id=1), params=dict(_method='delete'))
def test_show(self):
response = self.app.get(url('rdf_class', id=1))
def test_show_as_xml(self):
response = self.app.get(url('formatted_rdf_class', id=1, format='xml'))
def test_edit(self):
response = self.app.get(url('edit_rdf_class', id=1))
def test_edit_as_xml(self):
response = self.app.get(url('formatted_edit_rdf_class', id=1, format='xml'))
|
mistercrunch/airflow
|
refs/heads/master
|
tests/utils/test_log_handlers.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import logging.config
import os
import unittest
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, DagRun, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
DEFAULT_DATE = datetime(2016, 1, 1)
TASK_LOGGER = 'airflow.task'
FILE_TASK_HANDLER = 'task'
class TestFileTaskLogHandler(unittest.TestCase):
def clean_up(self):
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def setUp(self):
super().setUp()
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
self.clean_up()
# We use file task handler by default.
def tearDown(self):
self.clean_up()
super().tearDown()
def test_default_task_logging_setup(self):
# file task handler is used by default.
logger = logging.getLogger(TASK_LOGGER)
handlers = logger.handlers
self.assertEqual(len(handlers), 1)
handler = handlers[0]
self.assertEqual(handler.name, FILE_TASK_HANDLER)
def test_file_task_handler(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
dag.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING, execution_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
logger = ti.log
ti.log.disabled = False
file_handler = next(
(handler for handler in logger.handlers if handler.name == FILE_TASK_HANDLER), None
)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(os.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("1.log"), log_filename)
ti.run(ignore_ti_state=True)
file_handler.flush()
file_handler.close()
self.assertTrue(hasattr(file_handler, 'read'))
# Return value of read must be a tuple of list and list.
logs, metadatas = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
self.assertTrue(isinstance(metadatas, list))
self.assertEqual(len(logs), 1)
self.assertEqual(len(logs), len(metadatas))
self.assertTrue(isinstance(metadatas[0], dict))
target_re = r'\n\[[^\]]+\] {test_log_handlers.py:\d+} INFO - test\n'
# We should expect our log line from the callable above to appear in
# the logs we read back
self.assertRegex(logs[0][0][-1], target_re, "Logs were " + str(logs))
# Remove the generated tmp log file.
os.remove(log_filename)
def test_file_task_handler_running(self):
def task_callable(ti, **kwargs):
ti.log.info("test")
dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE)
task = PythonOperator(
task_id='task_for_testing_file_log_handler',
dag=dag,
python_callable=task_callable,
)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.try_number = 2
ti.state = State.RUNNING
logger = ti.log
ti.log.disabled = False
file_handler = next(
(handler for handler in logger.handlers if handler.name == FILE_TASK_HANDLER), None
)
self.assertIsNotNone(file_handler)
set_context(logger, ti)
self.assertIsNotNone(file_handler.handler)
# We expect set_context generates a file locally.
log_filename = file_handler.handler.baseFilename
self.assertTrue(os.path.isfile(log_filename))
self.assertTrue(log_filename.endswith("2.log"), log_filename)
logger.info("Test")
# Return value of read must be a tuple of list and list.
logs, metadatas = file_handler.read(ti)
self.assertTrue(isinstance(logs, list))
# Logs for running tasks should show up too.
self.assertTrue(isinstance(logs, list))
self.assertTrue(isinstance(metadatas, list))
self.assertEqual(len(logs), 2)
self.assertEqual(len(logs), len(metadatas))
self.assertTrue(isinstance(metadatas[0], dict))
# Remove the generated tmp log file.
os.remove(log_filename)
class TestFilenameRendering(unittest.TestCase):
def setUp(self):
dag = DAG('dag_for_testing_filename_rendering', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='task_for_testing_filename_rendering', dag=dag)
self.ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
def test_python_formatting(self):
expected_filename = (
'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log'
% DEFAULT_DATE.isoformat()
)
fth = FileTaskHandler('', '{dag_id}/{task_id}/{execution_date}/{try_number}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
def test_jinja_rendering(self):
expected_filename = (
'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log'
% DEFAULT_DATE.isoformat()
)
fth = FileTaskHandler('', '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log')
rendered_filename = fth._render_filename(self.ti, 42)
self.assertEqual(expected_filename, rendered_filename)
|
steedos/odoo
|
refs/heads/8.0
|
openerp/addons/base/res/ir_property.py
|
193
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
import time
from openerp import models, api
from openerp.osv import osv, orm, fields
from openerp.tools.misc import attrgetter
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
TYPE2FIELD = {
'char': 'value_text',
'float': 'value_float',
'boolean': 'value_integer',
'integer': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
'date': 'value_datetime',
'datetime': 'value_datetime',
'selection': 'value_text',
}
class ir_property(osv.osv):
_name = 'ir.property'
_columns = {
'name': fields.char('Name', select=1),
'res_id': fields.char('Resource', help="If not set, acts as a default value for new resources", select=1),
'company_id': fields.many2one('res.company', 'Company', select=1),
'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1),
'value_float' : fields.float('Value'),
'value_integer' : fields.integer('Value'),
'value_text' : fields.text('Value'), # will contain (char, text)
'value_binary' : fields.binary('Value'),
'value_reference': fields.char('Value'),
'value_datetime' : fields.datetime('Value'),
'type' : fields.selection([('char', 'Char'),
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
('date', 'Date'),
('datetime', 'DateTime'),
('selection', 'Selection'),
],
'Type',
required=True,
select=1),
}
_defaults = {
'type': 'many2one',
}
def _update_values(self, cr, uid, ids, values):
value = values.pop('value', None)
if not value:
return values
prop = None
type_ = values.get('type')
if not type_:
if ids:
prop = self.browse(cr, uid, ids[0])
type_ = prop.type
else:
type_ = self._defaults['type']
field = TYPE2FIELD.get(type_)
if not field:
raise osv.except_osv('Error', 'Invalid type')
if field == 'value_reference':
if isinstance(value, orm.BaseModel):
value = '%s,%d' % (value._name, value.id)
elif isinstance(value, (int, long)):
field_id = values.get('fields_id')
if not field_id:
if not prop:
raise ValueError()
field_id = prop.fields_id
else:
field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id)
value = '%s,%d' % (field_id.relation, value)
values[field] = value
return values
def write(self, cr, uid, ids, values, context=None):
return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context)
def create(self, cr, uid, values, context=None):
return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context)
def get_by_record(self, cr, uid, record, context=None):
if record.type in ('char', 'text', 'selection'):
return record.value_text
elif record.type == 'float':
return record.value_float
elif record.type == 'boolean':
return bool(record.value_integer)
elif record.type == 'integer':
return record.value_integer
elif record.type == 'binary':
return record.value_binary
elif record.type == 'many2one':
if not record.value_reference:
return False
model, resource_id = record.value_reference.split(',')
value = self.pool[model].browse(cr, uid, int(resource_id), context=context)
return value.exists()
elif record.type == 'datetime':
return record.value_datetime
elif record.type == 'date':
if not record.value_datetime:
return False
return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S'))
return False
def get(self, cr, uid, name, model, res_id=False, context=None):
domain = self._get_domain(cr, uid, name, model, context=context)
if domain is not None:
domain = [('res_id', '=', res_id)] + domain
#make the search with company_id asc to make sure that properties specific to a company are given first
nid = self.search(cr, uid, domain, limit=1, order='company_id asc', context=context)
if not nid: return False
record = self.browse(cr, uid, nid[0], context=context)
return self.get_by_record(cr, uid, record, context=context)
return False
def _get_domain(self, cr, uid, prop_name, model, context=None):
context = context or {}
cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model))
res = cr.fetchone()
if not res:
return None
cid = context.get('force_company')
if not cid:
company = self.pool.get('res.company')
cid = company._company_default_get(cr, uid, model, res[0], context=context)
return [('fields_id', '=', res[0]), ('company_id', 'in', [cid, False])]
@api.model
def get_multi(self, name, model, ids):
""" Read the property field `name` for the records of model `model` with
the given `ids`, and return a dictionary mapping `ids` to their
corresponding value.
"""
if not ids:
return {}
domain = self._get_domain(name, model)
if domain is None:
return dict.fromkeys(ids, False)
# retrieve the values for the given ids and the default value, too
refs = {('%s,%s' % (model, id)): id for id in ids}
refs[False] = False
domain += [('res_id', 'in', list(refs))]
# note: order by 'company_id asc' will return non-null values first
props = self.search(domain, order='company_id asc')
result = {}
for prop in props:
# for a given res_id, take the first property only
id = refs.pop(prop.res_id, None)
if id is not None:
result[id] = self.get_by_record(prop)
# set the default value to the ids that are not in result
default_value = result.pop(False, False)
for id in ids:
result.setdefault(id, default_value)
return result
@api.model
def set_multi(self, name, model, values):
""" Assign the property field `name` for the records of model `model`
with `values` (dictionary mapping record ids to their value).
"""
def clean(value):
return value.id if isinstance(value, models.BaseModel) else value
if not values:
return
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
# retrieve the default value for the field
default_value = clean(self.get(name, model))
# retrieve the properties corresponding to the given record ids
self._cr.execute("SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", (name, model))
field_id = self._cr.fetchone()[0]
company_id = self.env.context.get('force_company') or self.env['res.company']._company_default_get(model, field_id)
refs = {('%s,%s' % (model, id)): id for id in values}
props = self.search([
('fields_id', '=', field_id),
('company_id', '=', company_id),
('res_id', 'in', list(refs)),
])
# modify existing properties
for prop in props:
id = refs.pop(prop.res_id)
value = clean(values[id])
if value == default_value:
prop.unlink()
elif value != clean(prop.get_by_record(prop)):
prop.write({'value': value})
# create new properties for records that do not have one yet
for ref, id in refs.iteritems():
value = clean(values[id])
if value != default_value:
self.create({
'fields_id': field_id,
'company_id': company_id,
'res_id': ref,
'name': name,
'value': value,
'type': self.env[model]._fields[name].type,
})
@api.model
def search_multi(self, name, model, operator, value):
""" Return a domain for the records that match the given condition. """
default_matches = False
include_zero = False
field = self.env[model]._fields[name]
if field.type == 'many2one':
comodel = field.comodel_name
def makeref(value):
return value and '%s,%s' % (comodel, value)
if operator == "=":
value = makeref(value)
# if searching properties not set, search those not in those set
if value is False:
default_matches = True
elif operator in ('!=', '<=', '<', '>', '>='):
value = makeref(value)
elif operator in ('in', 'not in'):
value = map(makeref, value)
elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike', 'not ilike'):
# most probably inefficient... but correct
target = self.env[comodel]
target_names = target.name_search(value, operator=operator, limit=None)
target_ids = map(itemgetter(0), target_names)
operator, value = 'in', map(makeref, target_ids)
elif field.type in ('integer', 'float'):
# No record is created in ir.property if the field's type is float or integer with a value
# equal to 0. Then to match with the records that are linked to a property field equal to 0,
# the negation of the operator must be taken to compute the goods and the domain returned
# to match the searched records is just the opposite.
if value == 0 and operator == '=':
operator = '!='
include_zero = True
elif value <= 0 and operator == '>=':
operator = '<'
include_zero = True
elif value <= 0 and operator == '>':
operator = '<='
include_zero = True
elif value >= 0 and operator == '<=':
operator = '>'
include_zero = True
elif value >= 0 and operator == '<':
operator = '>='
include_zero = True
# retrieve the properties that match the condition
domain = self._get_domain(name, model)
if domain is None:
raise Exception()
props = self.search(domain + [(TYPE2FIELD[field.type], operator, value)])
# retrieve the records corresponding to the properties that match
good_ids = []
for prop in props:
if prop.res_id:
res_model, res_id = prop.res_id.split(',')
good_ids.append(int(res_id))
else:
default_matches = True
if include_zero:
return [('id', 'not in', good_ids)]
elif default_matches:
# exclude all records with a property that does not match
all_ids = []
props = self.search(domain + [('res_id', '!=', False)])
for prop in props:
res_model, res_id = prop.res_id.split(',')
all_ids.append(int(res_id))
bad_ids = list(set(all_ids) - set(good_ids))
return [('id', 'not in', bad_ids)]
else:
return [('id', 'in', good_ids)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wandec/grr
|
refs/heads/master
|
client/client_actions/grr_rekall.py
|
2
|
#!/usr/bin/env python
"""Execute a Rekall plugin on the client memory.
This module implements the Rekall enabled client actions.
"""
import os
import pdb
import sys
# Initialize the Rekall plugins, so pylint: disable=unused-import
from rekall import addrspace
from rekall import config
from rekall import constants
from rekall import io_manager
from rekall import obj
from rekall import plugins
from rekall import session
from rekall.plugins.addrspaces import standard
from rekall.plugins.renderers import data_export
from rekall.ui import json_renderer
# pylint: enable=unused-import
import logging
from grr.client import actions
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
class Error(Exception):
pass
class ProfileNotFoundError(ValueError):
pass
class GRRObjectRenderer(data_export.NativeDataExportObjectRenderer):
"""A default object renderer for the GRRRekallRenderer.
GRR Renders all Rekall objects using the Rekall DataExportRenderer. By default
we just delegate everything to DataExportRenderer.
"""
renders_type = "object"
renderers = ["GRRRekallRenderer"]
def _GetDelegateObjectRenderer(self, item):
return self.FromEncoded(item, "DataExportRenderer")(
renderer=self.renderer)
def EncodeToJsonSafe(self, item, **options):
return self._GetDelegateObjectRenderer(item).EncodeToJsonSafe(
item, **options)
def DecodeFromJsonSafe(self, value, options):
return self._GetDelegateObjectRenderer(value).DecodeFromJsonSafe(
value, options)
def RawHTML(self, item, **options):
raise NotImplementedError("Not producing HTML on the client.")
def Summary(self, item, **options):
return self._GetDelegateObjectRenderer(item).Summary(item, **options)
class GRRRekallRenderer(data_export.DataExportRenderer):
"""This renderer sends all messages to the server encoded as JSON.
Note that this renderer is used to encode and deliver Rekall objects to the
server. Additionally Rekall ObjectRenderer implementations specific to GRR
will be attached to this renderer.
"""
name = None
# Maximum number of statements to queue before sending a reply.
RESPONSE_CHUNK_SIZE = 1000
def __init__(self, rekall_session=None, action=None):
"""Collect Rekall rendering commands and send to the server.
Args:
rekall_session: The Rekall session object.
action: The GRR Client Action which owns this renderer. We will use it to
actually send messages back to the server.
"""
try:
sys.stdout.isatty()
except AttributeError:
sys.stdout.isatty = lambda: False
super(GRRRekallRenderer, self).__init__(session=rekall_session)
# A handle to the client action we can use for sending responses.
self.action = action
# The current plugin we are running.
self.plugin = None
self.context_messages = {}
self.new_context_messages = {}
self.robust_encoder = json_renderer.RobustEncoder()
def start(self, plugin_name=None, kwargs=None):
self.plugin = plugin_name
return super(GRRRekallRenderer, self).start(plugin_name=plugin_name,
kwargs=kwargs)
def write_data_stream(self):
"""Prepares a RekallResponse and send to the server."""
if self.data:
response_msg = rdfvalue.RekallResponse(
json_messages=self.robust_encoder.encode(self.data),
json_context_messages=self.robust_encoder.encode(
self.context_messages.items()),
plugin=self.plugin)
self.context_messages = self.new_context_messages
self.new_context_messages = {}
# Queue the response to the server.
self.action.SendReply(response_msg)
def SendMessage(self, statement):
super(GRRRekallRenderer, self).SendMessage(statement)
if statement[0] in ["s", "t"]:
self.new_context_messages[statement[0]] = statement[1]
if len(self.data) > self.RESPONSE_CHUNK_SIZE:
self.flush()
def open(self, directory=None, filename=None, mode="rb"):
result = tempfiles.CreateGRRTempFile(filename=filename, mode=mode)
# The tempfile library created an os path, we pass it through vfs to
# normalize it.
with vfs.VFSOpen(rdfvalue.PathSpec(
path=result.name,
pathtype=rdfvalue.PathSpec.PathType.OS)) as vfs_fd:
dict_pathspec = vfs_fd.pathspec.ToPrimitiveDict()
self.SendMessage(["file", dict_pathspec])
return result
def report_error(self, message):
super(GRRRekallRenderer, self).report_error(message)
if flags.FLAGS.debug:
pdb.post_mortem()
# We need to use the Rekall InteractiveSession here so we get sensible default
# values. This should be fixed in Rekall directly at some point by merging
# InteractiveSession into Session.
class GrrRekallSession(session.InteractiveSession):
"""A GRR Specific Rekall session."""
def __init__(self, fhandle=None, action=None, **session_args):
super(GrrRekallSession, self).__init__(**session_args)
self.action = action
# Apply default configuration options to the session state.
with self.state:
for name, options in config.OPTIONS.args.iteritems():
# We don't want to override configuration options passed via
# **session_args.
if name not in self.state:
self.state.Set(name, options.get("default"))
# Ensure the action's Progress() method is called when Rekall reports
# progress.
self.progress.Register(id(self), lambda *_, **__: self.action.Progress())
def LoadProfile(self, name):
"""Wraps the Rekall profile's LoadProfile to fetch profiles from GRR."""
# If the user specified a special profile path we use their choice.
profile = super(GrrRekallSession, self).LoadProfile(name)
if profile:
return profile
# Cant load the profile, we need to ask the server for it.
logging.debug("Asking server for profile %s", name)
self.action.SendReply(
rdfvalue.RekallResponse(
missing_profile=name,
repository_version=constants.PROFILE_REPOSITORY_VERSION,
))
# Wait for the server to wake us up. When we wake up the server should
# have sent the profile over by calling the WriteRekallProfile.
self.action.Suspend()
# Now the server should have sent the data already. We try to load the
# profile one more time.
return super(GrrRekallSession, self).LoadProfile(
name, use_cache=False)
def GetRenderer(self):
# We will use this renderer to push results to the server.
return GRRRekallRenderer(rekall_session=self, action=self.action)
class WriteRekallProfile(actions.ActionPlugin):
"""A client action to write a Rekall profile to the local cache."""
in_rdfvalue = rdfvalue.RekallProfile
def Run(self, args):
output_filename = utils.JoinPath(
config_lib.CONFIG["Client.rekall_profile_cache_path"],
args.version, args.name)
try:
os.makedirs(os.path.dirname(output_filename))
except OSError:
pass
with open(output_filename + ".gz", "wb") as fd:
fd.write(args.data)
class RekallCachingIOManager(io_manager.DirectoryIOManager):
order = io_manager.DirectoryIOManager.order - 1
def CheckInventory(self, name):
path = self._GetAbsolutePathName(name)
result = (os.access(path + ".gz", os.F_OK) or
os.access(path, os.F_OK))
return result
class RekallAction(actions.SuspendableAction):
"""Runs a Rekall command on live memory."""
in_rdfvalue = rdfvalue.RekallRequest
out_rdfvalue = rdfvalue.RekallResponse
def Iterate(self):
"""Run a Rekall plugin and return the result."""
# Create a session and run all the plugins with it.
session_args = self.request.session.ToDict()
if "filename" not in session_args and self.request.device:
session_args["filename"] = self.request.device.path
# If the user has not specified a special profile path, we use the local
# cache directory.
if "profile_path" not in session_args:
session_args["profile_path"] = [config_lib.CONFIG[
"Client.rekall_profile_cache_path"]]
rekal_session = GrrRekallSession(action=self, **session_args)
for plugin_request in self.request.plugins:
# Get the keyword args to this plugin.
plugin_args = plugin_request.args.ToDict()
try:
rekal_session.RunPlugin(plugin_request.plugin, **plugin_args)
except Exception as e: # pylint: disable=broad-except
# The exception has already been logged at this point in the renderer.
logging.info(str(e))
|
vadimtk/chrome4sdp
|
refs/heads/master
|
chrome/common/extensions/docs/server2/appengine_url_fetcher.py
|
78
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import logging
import posixpath
import time
from appengine_wrappers import urlfetch
from environment import GetAppVersion
from future import Future
_MAX_RETRIES = 5
_RETRY_DELAY_SECONDS = 30
def _MakeHeaders(username, password, access_token):
headers = {
'User-Agent': 'Chromium docserver %s' % GetAppVersion(),
'Cache-Control': 'max-age=0',
}
if username is not None and password is not None:
headers['Authorization'] = 'Basic %s' % base64.b64encode(
'%s:%s' % (username, password))
if access_token is not None:
headers['Authorization'] = 'OAuth %s' % access_token
return headers
class AppEngineUrlFetcher(object):
"""A wrapper around the App Engine urlfetch module that allows for easy
async fetches.
"""
def __init__(self, base_path=None):
assert base_path is None or not base_path.endswith('/'), base_path
self._base_path = base_path
self._retries_left = _MAX_RETRIES
def Fetch(self, url, username=None, password=None, access_token=None):
"""Fetches a file synchronously.
"""
return urlfetch.fetch(self._FromBasePath(url),
deadline=20,
headers=_MakeHeaders(username,
password,
access_token))
def FetchAsync(self, url, username=None, password=None, access_token=None):
"""Fetches a file asynchronously, and returns a Future with the result.
"""
def process_result(result):
if result.status_code == 429:
if self._retries_left == 0:
logging.error('Still throttled. Giving up.')
return result
self._retries_left -= 1
logging.info('Throttled. Trying again in %s seconds.' %
_RETRY_DELAY_SECONDS)
time.sleep(_RETRY_DELAY_SECONDS)
return self.FetchAsync(url, username, password, access_token).Get()
return result
rpc = urlfetch.create_rpc(deadline=20)
urlfetch.make_fetch_call(rpc,
self._FromBasePath(url),
headers=_MakeHeaders(username,
password,
access_token))
return Future(callback=lambda: process_result(rpc.get_result()))
def _FromBasePath(self, url):
assert not url.startswith('/'), url
if self._base_path is not None:
url = posixpath.join(self._base_path, url) if url else self._base_path
return url
|
gsehub/edx-platform
|
refs/heads/gsehub-release
|
openedx/core/djangoapps/site_configuration/admin.py
|
24
|
"""
Django admin page for Site Configuration models
"""
from django.contrib import admin
from .models import SiteConfiguration, SiteConfigurationHistory
class SiteConfigurationAdmin(admin.ModelAdmin):
"""
Admin interface for the SiteConfiguration object.
"""
list_display = ('site', 'enabled', 'values')
search_fields = ('site__domain', 'values')
class Meta(object):
"""
Meta class for SiteConfiguration admin model
"""
model = SiteConfiguration
admin.site.register(SiteConfiguration, SiteConfigurationAdmin)
class SiteConfigurationHistoryAdmin(admin.ModelAdmin):
"""
Admin interface for the SiteConfigurationHistory object.
"""
list_display = ('site', 'enabled', 'created', 'modified')
search_fields = ('site__domain', 'values', 'created', 'modified')
ordering = ['-created']
class Meta(object):
"""
Meta class for SiteConfigurationHistory admin model
"""
model = SiteConfigurationHistory
def has_add_permission(self, request):
"""Don't allow adds"""
return False
def has_delete_permission(self, request, obj=None):
"""Don't allow deletes"""
return False
admin.site.register(SiteConfigurationHistory, SiteConfigurationHistoryAdmin)
|
david-ragazzi/nupic
|
refs/heads/master
|
tests/swarming/nupic/swarming/experiments/delta/permutations.py
|
8
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'value'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'value': PermuteEncoder(fieldName='value',
encoderClass='ScalarSpaceEncoder',
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
'_classifierInput': dict(fieldname='value',
type='ScalarSpaceEncoder',
classifierOnly=True,
space=PermuteChoices(['delta', 'absolute']),
clipInput=True,
w=21,
n=PermuteInt(28, 521)),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
'clParams': {
'alpha': PermuteFloat(0.000100, 0.100000),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*value.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=value")
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=10:field=value"
minParticlesPerSwarm = None
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
ahmadio/edx-platform
|
refs/heads/master
|
common/lib/symmath/symmath/test_symmath_check.py
|
166
|
from unittest import TestCase
from .symmath_check import symmath_check
class SymmathCheckTest(TestCase):
def test_symmath_check_integers(self):
number_list = [i for i in range(-100, 100)]
self._symmath_check_numbers(number_list)
def test_symmath_check_floats(self):
number_list = [i + 0.01 for i in range(-100, 100)]
self._symmath_check_numbers(number_list)
def test_symmath_check_same_symbols(self):
expected_str = "x+2*y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mn>2</mn>
<mo>*</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that the exact same symbolic string is marked correct
result = symmath_check(expected_str, expected_str, dynamath=[dynamath])
self.assertTrue('ok' in result and result['ok'])
def test_symmath_check_equivalent_symbols(self):
expected_str = "x+2*y"
input_str = "x+y+y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mi>y</mi>
<mo>+</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that equivalent symbolic strings are marked correct
result = symmath_check(expected_str, input_str, dynamath=[dynamath])
self.assertTrue('ok' in result and result['ok'])
def test_symmath_check_different_symbols(self):
expected_str = "0"
input_str = "x+y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that an incorrect response is marked incorrect
result = symmath_check(expected_str, input_str, dynamath=[dynamath])
self.assertTrue('ok' in result and not result['ok'])
self.assertFalse('fail' in result['msg'])
def _symmath_check_numbers(self, number_list):
for n in number_list:
# expect = ans, so should say the answer is correct
expect = n
ans = n
result = symmath_check(str(expect), str(ans))
self.assertTrue('ok' in result and result['ok'],
"%f should == %f" % (expect, ans))
# Change expect so that it != ans
expect += 0.1
result = symmath_check(str(expect), str(ans))
self.assertTrue('ok' in result and not result['ok'],
"%f should != %f" % (expect, ans))
|
rwarren14/robotframework
|
refs/heads/master
|
src/robot/libraries/dialogs_py.py
|
23
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from threading import currentThread
from Tkinter import (Tk, Toplevel, Frame, Listbox, Label, Button, Entry,
BOTH, END, LEFT, W)
class _TkDialog(Toplevel):
_left_button = 'OK'
_right_button = 'Cancel'
def __init__(self, message, value=None, **extra):
self._prevent_execution_with_timeouts()
self._parent = self._get_parent()
Toplevel.__init__(self, self._parent)
self._initialize_dialog()
self._create_body(message, value, **extra)
self._create_buttons()
self._result = None
def _prevent_execution_with_timeouts(self):
if 'linux' not in sys.platform \
and currentThread().getName() != 'MainThread':
raise RuntimeError('Dialogs library is not supported with '
'timeouts on Python on this platform.')
def _get_parent(self):
parent = Tk()
parent.withdraw()
return parent
def _initialize_dialog(self):
self.title('Robot Framework')
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self._right_button_clicked)
self.bind("<Escape>", self._right_button_clicked)
self.minsize(250, 80)
self.geometry("+%d+%d" % self._get_center_location())
self._bring_to_front()
def _get_center_location(self):
x = (self.winfo_screenwidth() - self.winfo_reqwidth()) / 2
y = (self.winfo_screenheight() - self.winfo_reqheight()) / 2
return x, y
def _bring_to_front(self):
self.attributes('-topmost', True)
self.attributes('-topmost', False)
def _create_body(self, message, value, **extra):
frame = Frame(self)
Label(frame, text=message, anchor=W, justify=LEFT, wraplength=800).pack(fill=BOTH)
selector = self._create_selector(frame, value, **extra)
if selector:
selector.pack(fill=BOTH)
selector.focus_set()
frame.pack(padx=5, pady=5, expand=1, fill=BOTH)
def _create_selector(self, frame, value):
return None
def _create_buttons(self):
frame = Frame(self)
self._create_button(frame, self._left_button,
self._left_button_clicked)
self._create_button(frame, self._right_button,
self._right_button_clicked)
frame.pack()
def _create_button(self, parent, label, callback):
if label:
button = Button(parent, text=label, width=10, command=callback)
button.pack(side=LEFT, padx=5, pady=5)
def _left_button_clicked(self, event=None):
if self._validate_value():
self._result = self._get_value()
self._close()
def _validate_value(self):
return True
def _get_value(self):
return None
def _close(self):
# self.destroy() is not enough on Linux
self._parent.destroy()
def _right_button_clicked(self, event=None):
self._result = self._get_right_button_value()
self._close()
def _get_right_button_value(self):
return None
def show(self):
self.wait_window(self)
return self._result
class MessageDialog(_TkDialog):
_right_button = None
class InputDialog(_TkDialog):
def __init__(self, message, default='', hidden=False):
_TkDialog.__init__(self, message, default, hidden=hidden)
def _create_selector(self, parent, default, hidden):
self._entry = Entry(parent, show='*' if hidden else '')
self._entry.insert(0, default)
self._entry.select_range(0, END)
return self._entry
def _get_value(self):
return self._entry.get()
class SelectionDialog(_TkDialog):
def __init__(self, message, values):
_TkDialog.__init__(self, message, values)
def _create_selector(self, parent, values):
self._listbox = Listbox(parent)
for item in values:
self._listbox.insert(END, item)
return self._listbox
def _validate_value(self):
return bool(self._listbox.curselection())
def _get_value(self):
return self._listbox.get(self._listbox.curselection())
class PassFailDialog(_TkDialog):
_left_button = 'PASS'
_right_button = 'FAIL'
def _get_value(self):
return True
def _get_right_button_value(self):
return False
|
cetic/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_iis_webapplication.py
|
22
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_webapplication
version_added: "2.0"
short_description: Configures IIS web applications
description:
- Creates, removes, and configures IIS web applications.
options:
name:
description:
- Name of the web application.
required: true
site:
description:
- Name of the site on which the application is created.
required: true
state:
description:
- State of the web application.
choices: [ absent, present ]
default: present
physical_path:
description:
- The physical path on the remote host to use for the new application.
- The specified folder must already exist.
application_pool:
description:
- The application pool in which the new site executes.
author:
- Henrik Wallström
'''
EXAMPLES = r'''
- name: Add ACME webapplication on IIS.
win_iis_webapplication:
name: api
site: acme
state: present
physical_path: C:\apps\acme\api
'''
RETURN = r'''
application_pool:
description: The used/implemented application_pool value
returned: success
type: string
sample: DefaultAppPool
physical_path:
description: The used/implemented physical_path value
returned: success
type: string
sample: C:\apps\acme\api
'''
|
Pythonify/awesome
|
refs/heads/master
|
venv/lib/python2.7/site-packages/wheel/test/test_paths.py
|
565
|
import wheel.paths
from distutils.command.install import SCHEME_KEYS
def test_path():
d = wheel.paths.get_install_paths('wheel')
assert len(d) == len(SCHEME_KEYS)
|
sysadminmatmoz/ingadhoc
|
refs/heads/8.0
|
project_issue_closure_restrictions/__openerp__.py
|
4
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Issue Closure Restrictions',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project Issue Closure Restrictions
==================================
You can only close Issue if:
- They don't have any active task (we consider active task the ones in stages without option "folded")
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'project_task_issues',
],
'data': [
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
|
Leila20/django
|
refs/heads/master
|
tests/admin_scripts/custom_templates/app_template/api.py
|
581
|
# your API code
|
gabrielfalcao/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/contrib/gis/gdal/field.py
|
264
|
from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
|
40223227/2015cdbg6w0622-40223227-
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/formatter.py
|
751
|
"""Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
|
OpenTrons/opentrons-api
|
refs/heads/master
|
api/tests/__init__.py
|
12133432
| |
shawnadelic/shuup
|
refs/heads/master
|
shuup/notify/base.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from abc import abstractmethod
import six
from django.utils.encoding import force_text
from django.utils.text import camel_case_to_spaces
from jinja2.exceptions import TemplateError
from shuup.apps.provides import get_identifier_to_object_map
from shuup.notify.enums import (
ConstantUse, TemplateUse, UNILINGUAL_TEMPLATE_LANGUAGE
)
from shuup.notify.template import render_in_context, Template
from shuup.utils.text import snake_case, space_case
from .typology import Type
class BaseMetaclass(type):
def __new__(cls, name, bases, namespace):
variables = []
bindings = []
for key in list(namespace.keys()):
value = namespace[key]
if isinstance(value, Binding):
dest_list = bindings
elif isinstance(value, Variable):
dest_list = variables
else:
dest_list = None
if dest_list is not None:
dest_list.append((key, value))
del namespace[key]
namespace.setdefault("variables", {}).update(variables)
namespace.setdefault("bindings", {}).update(bindings)
# Figure out some sane defaults
if "identifier" not in namespace:
namespace["identifier"] = snake_case(camel_case_to_spaces(name))
if namespace.get("identifier") and not namespace.get("name"):
namespace["name"] = space_case(namespace["identifier"]).title()
return type.__new__(cls, name, bases, namespace)
class Variable(object):
_creation_counter = 0 # For sorting, incremented by `__init__`
def __init__(self, name, type=Type, required=True, help_text=""):
self.position = Variable._creation_counter
Variable._creation_counter += 1
if callable(type):
type = type()
assert isinstance(type, Type), "`type` must be a Type instance"
assert isinstance(required, bool), "`required` must be a bool (it's %r)" % required
self.name = name
self.type = type
self.required = bool(required)
self.help_text = help_text
def get_matching_types(self, variable_dict):
return set(
name
for name, variable
in six.iteritems(variable_dict)
if self.type.is_coercible_from(variable.type)
)
class Binding(Variable):
def __init__(self,
name, type=Type, required=False,
help_text="", constant_use=ConstantUse.VARIABLE_ONLY, default=None):
super(Binding, self).__init__(name=name, type=type, required=required, help_text=help_text)
self.constant_use = constant_use
self.default = default
@property
def accepts_any_type(self):
return (not self.type.identifier)
@property
def allow_constant(self):
return self.constant_use in (ConstantUse.CONSTANT_ONLY, ConstantUse.VARIABLE_OR_CONSTANT)
@property
def allow_variable(self):
return self.constant_use in (ConstantUse.VARIABLE_ONLY, ConstantUse.VARIABLE_OR_CONSTANT)
def get_value(self, context, bind_data):
if bind_data:
assert isinstance(bind_data, dict), "invalid bind data"
if self.allow_constant and "constant" in bind_data:
return self.type.unserialize(bind_data["constant"])
if self.allow_variable and "variable" in bind_data:
return context.get(bind_data["variable"], self.default)
return self.default
class TemplatedBinding(Binding):
def __init__(self, *args, **kwargs):
super(TemplatedBinding, self).__init__(*args, **kwargs)
if self.allow_variable:
raise ValueError("TemplatedBindings may not allow variable binding for security reasons")
def get_value(self, context, bind_data):
value = super(TemplatedBinding, self).get_value(context, bind_data)
try:
return render_in_context(context, value)
except TemplateError:
# Return the unrendered value if there was template trouble.
return value
class Base(six.with_metaclass(BaseMetaclass)):
identifier = None
name = None
description = None
variables = {} # Filled by the metaclass
bindings = {} # Filled by the metaclass
provide_category = None
@classmethod
def class_for_identifier(cls, identifier):
return get_identifier_to_object_map(cls.provide_category).get(identifier)
class Event(Base):
provide_category = "notify_event"
identifier = None
#: The name of the variable to be used as the log target for this event.
#:
#: The target variable must have an `add_log_entry` method.
log_target_variable = None
def __init__(self, **variable_values):
if not self.identifier:
raise ValueError("Attempting to instantiate identifierless event")
self.variable_values = {}
self.load_variables(variable_values)
@property
def log_target(self):
return self.variable_values.get(self.log_target_variable)
def load_variables(self, variable_values):
for key in sorted(variable_values.keys()):
variable = self.variables.get(key)
if not variable:
raise ValueError("Unknown variable %r for event %s" % (key, self.identifier))
self.variable_values[key] = variable.type.unserialize(variable_values.pop(key))
for name, variable in six.iteritems(self.variables):
if variable.required and name not in self.variable_values:
raise ValueError("Required variable %r missing for event %s" % (name, self.identifier))
def run(self):
from .runner import run_event
run_event(event=self)
class ScriptItem(Base):
provide_category = None
def __init__(self, data, validate=True):
if not self.identifier: # pragma: no cover
raise ValueError("Attempting to initialize %s without identifier: %r" % (self.__class__.__name__, self))
self.data = data
if validate:
self.verify_bindings()
def verify_bindings(self):
unbound = set()
for name, binding in six.iteritems(self.bindings):
if binding.required and name not in self.data:
unbound.add(name)
if unbound:
raise ValueError("Bindings unbound for %r: %r" % (self.identifier, unbound))
def get_value(self, context, binding_name):
"""
Get the actual value of a binding from the given script context.
:param context: Script Context
:type context: shuup.notify.script.Context
:param binding_name: Binding name.
:type binding_name: str
:return: The variable value
"""
binding = self.bindings[binding_name]
bind_data = self.data.get(binding_name)
return binding.get_value(context, bind_data)
def get_values(self, context):
"""
Get all binding values in a dict.
:param context: Script Context
:type context: shuup.notify.script.Context
:return: Dict of binding name -> value
:rtype: dict[name, value]
"""
return dict((binding_name, self.get_value(context, binding_name)) for binding_name in self.bindings)
@classmethod
def unserialize(cls, data, validate=True):
data = data.copy()
obj_cls = cls.class_for_identifier(data.pop("identifier"))
assert issubclass(obj_cls, cls)
return obj_cls(data, validate=validate)
def serialize(self):
data = dict(identifier=self.identifier)
data.update(**self.data)
return data
def __eq__(self, other):
return self.identifier == other.identifier and self.data == other.data
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def get_ui_info_map(cls):
map = {}
for identifier, object in six.iteritems(get_identifier_to_object_map(cls.provide_category)):
map[identifier] = {
"identifier": str(identifier),
"name": force_text(object.name),
"description": force_text(getattr(object, "description", None) or ""),
}
return map
class Condition(ScriptItem):
provide_category = "notify_condition"
@abstractmethod
def test(self, context):
return False # pragma: no cover
class Action(ScriptItem):
provide_category = "notify_action"
template_use = TemplateUse.NONE
template_fields = {}
@abstractmethod
def execute(self, context):
"""
:param context: Script Context
:type context: shuup.notify.script.Context
"""
pass # pragma: no cover
def get_template(self, context):
"""
Get this action's template instance, bound in the
context.
:rtype: shuup.notify.template.Template
"""
data = self.data.get("template_data")
if not data:
raise ValueError("No template data in action")
return Template(context, data=data)
def get_template_values(self, context, language_preferences=()):
"""
Render this Action's template with data from the given context.
:param context: Script Context
:type context: shuup.notify.script.Context
:param language_preferences:
Language preference list.
The first language in the template to have values for
all fields will be used.
Has no effect for UNILINGUAL template_use.
:type language_preferences: list[str]
:return: Dict of field name -> rendered template text.
:rtype: dict[str, str]|None
"""
if self.template_use == TemplateUse.NONE:
raise ValueError("Attempting to `get_template_values` on an action with no template use")
template = self.get_template(context)
fields = self.template_fields.keys()
if self.template_use == TemplateUse.UNILINGUAL:
language_preferences = [UNILINGUAL_TEMPLATE_LANGUAGE]
return template.render_first_match(language_preferences, fields)
|
ZachRiegel/scriptbin
|
refs/heads/master
|
pypyjs/modules/_curses_panel.py
|
11
|
"Reimplementation of the standard extension module '_curses_panel' using cffi."
from _curses import _ensure_initialised, _check_ERR, error, ffi, lib
def _call_lib(method_name, *args):
return getattr(lib, method_name)(*args)
def _call_lib_check_ERR(method_name, *args):
return _check_ERR(_call_lib(method_name, *args), method_name)
def _mk_no_arg_no_return(method_name):
def _execute():
_ensure_initialised()
return _call_lib_check_ERR(method_name)
_execute.__name__ = method_name
return _execute
def _mk_no_arg_return_val(method_name):
def _execute():
return _call_lib(method_name)
_execute.__name__ = method_name
return _execute
def _mk_args_no_return(method_name):
def _execute(*args):
return _call_lib_check_ERR(method_name, *args)
_execute.__name__ = method_name
return _execute
# ____________________________________________________________
bottom_panel = _mk_no_arg_no_return("bottom_panel")
hide_panel = _mk_no_arg_no_return("hide_panel")
show_panel = _mk_no_arg_no_return("show_panel")
top_panel = _mk_no_arg_no_return("top_panel")
panel_hidden = _mk_no_arg_return_val("panel_hidden")
move_panel = _mk_args_no_return("move_panel")
_panels = []
def _add_panel(panel):
_panels.insert(0, panel)
def _remove_panel(panel):
_panels.remove(panel)
def _find_panel(pan):
for panel in _panels:
if panel._pan == pan:
return panel
return None
class Panel(object):
def __init__(self, pan, window):
self._pan = pan
self._window = window
_add_panel(self)
def __del__(self):
_remove_panel(self)
lib.del_panel(self._pan)
def above(self):
pan = lib.panel_above(self._pan)
if pan == ffi.NULL:
return None
return _find_panel(pan)
def below(self):
pan = lib.panel_below(self._pan)
if pan == ffi.NULL:
return None
return _find_panel(pan)
def window(self):
return self._window
def replace_panel(self, window):
panel = _find_panel(self._pan)
_check_ERR(lib.replace_panel(self._pan, window._win), "replace_panel")
panel._window = window
return None
def set_panel_userptr(self, obj):
code = lib.set_panel_userptr(self._pan, ffi.cast("void *", obj))
return _check_ERR(code, "set_panel_userptr")
def userptr(self):
# XXX: This is probably wrong.
obj = lib.panel_userptr(self._pan)
if obj == ffi.NULL:
raise error("no userptr set")
return obj
def bottom_panel():
_ensure_initialised()
pan = lib.panel_above(ffi.NULL)
if pan == ffi.NULL:
return None
return _find_panel(pan)
def new_panel(window):
pan = lib.new_panel(window._win)
return Panel(pan, window)
def panel_below():
_ensure_initialised()
pan = lib.panel_below(ffi.NULL)
if pan == ffi.NULL:
return None
return _find_panel(pan)
def update_panels():
_ensure_initialised()
lib.update_panels()
return None
|
hexenxp14/django-avatar
|
refs/heads/master
|
avatar/admin.py
|
31
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from avatar.models import Avatar
from avatar.signals import avatar_updated
from avatar.templatetags.avatar_tags import avatar
from avatar.util import get_user_model
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__%s' % getattr(get_user_model(), 'USERNAME_FIELD', 'username'),)
list_per_page = 50
def get_avatar(self, avatar_in):
return avatar(avatar_in.user, 80)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
def save_model(self, request, obj, form, change):
super(AvatarAdmin, self).save_model(request, obj, form, change)
avatar_updated.send(sender=Avatar, user=request.user, avatar=obj)
admin.site.register(Avatar, AvatarAdmin)
|
hieukypc/ERP
|
refs/heads/master
|
openerp/addons/base/tests/test_func.py
|
30
|
# -*- coding: utf-8 -*-
import functools
import unittest
from openerp.tools.func import compose
from openerp.tools import frozendict
class TestCompose(unittest.TestCase):
def test_basic(self):
str_add = compose(str, lambda a, b: a + b)
self.assertEqual(
str_add(1, 2),
"3")
def test_decorator(self):
""" ensure compose() can be partially applied as a decorator
"""
@functools.partial(compose, unicode)
def mul(a, b):
return a * b
self.assertEqual(mul(5, 42), u"210")
class TestFrozendict(unittest.TestCase):
def test_frozendict_immutable(self):
""" Ensure that a frozendict is immutable. """
vals = {'name': 'Joe', 'age': 42}
frozen_vals = frozendict(vals)
# check __setitem__, __delitem__
with self.assertRaises(Exception):
frozen_vals['surname'] = 'Jack'
with self.assertRaises(Exception):
frozen_vals['name'] = 'Jack'
with self.assertRaises(Exception):
del frozen_vals['name']
# check update, setdefault, pop, popitem, clear
with self.assertRaises(Exception):
frozen_vals.update({'surname': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.update({'name': 'Jack'})
with self.assertRaises(Exception):
frozen_vals.setdefault('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('surname', 'Jack')
with self.assertRaises(Exception):
frozen_vals.pop('name', 'Jack')
with self.assertRaises(Exception):
frozen_vals.popitem()
with self.assertRaises(Exception):
frozen_vals.clear()
def test_frozendict_hash(self):
""" Ensure that a frozendict is hashable. """
# dict with simple values
hash(frozendict({'name': 'Joe', 'age': 42}))
# dict with tuples, lists, and embedded dicts
hash(frozendict({
'user_id': (42, 'Joe'),
'line_ids': [(0, 0, {'values': [42]})],
}))
|
unicef-zambia/zambia-ureport
|
refs/heads/master
|
zambiaureport/contrib/sites/migrations/0002_set_site_domain_and_name.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "zambiaureport.org",
"name": "zambiaureport"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
x86Labs/amoco
|
refs/heads/release
|
amoco/arch/x86/spec_fpu.py
|
10
|
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2014 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# spec_xxx files are providers for instruction objects.
from .utils import *
#------------------------------------------------------
# amoco x86 FPU (x87) instruction specs:
#------------------------------------------------------
ISPECS = []
@ispec_ia32("16>[ {0f}{77} ]", mnemonic = "EMMS", type=type_cpu_state)
def ia32_nooperand(obj):
pass
@ispec_ia32("16>[ {d9} reg(3) 0 0011 ]", mnemonic = "FLD") # D9 C0+i
@ispec_ia32("16>[ {d9} reg(3) 1 0011 ]", mnemonic = "FXCH") # D9 C8+i
@ispec_ia32("16>[ {d8} reg(3) 0 1011 ]", mnemonic = "FCOM") # D8 D0+i
@ispec_ia32("16>[ {d8} reg(3) 1 1011 ]", mnemonic = "FCOMP") # D8 D8+i
@ispec_ia32("16>[ {dd} reg(3) 0 0011 ]", mnemonic = "FFREE") # DD C0+i
@ispec_ia32("16>[ {dd} reg(3) 0 1011 ]", mnemonic = "FST") # DD D0+i
@ispec_ia32("16>[ {dd} reg(3) 1 1011 ]", mnemonic = "FSTP") # DD D8+i
@ispec_ia32("16>[ {dd} reg(3) 0 0111 ]", mnemonic = "FUCOM") # DD E0+i
@ispec_ia32("16>[ {dd} reg(3) 1 0111 ]", mnemonic = "FUCOMP") # DD E8+i
def ia32_fpu_reg(obj, reg):
obj.operands = [env.st(reg)]
obj.type = type_data_processing
@ispec_ia32("*>[ {d8} /0 ]", mnemonic = "FADD", _size = 32)
@ispec_ia32("*>[ {d8} /1 ]", mnemonic = "FMUL", _size = 32)
@ispec_ia32("*>[ {d8} /2 ]", mnemonic = "FCOM", _size = 32)
@ispec_ia32("*>[ {d8} /3 ]", mnemonic = "FCOMP", _size = 32)
@ispec_ia32("*>[ {d8} /4 ]", mnemonic = "FSUB", _size = 32)
@ispec_ia32("*>[ {d8} /5 ]", mnemonic = "FSUBR", _size = 32)
@ispec_ia32("*>[ {d8} /6 ]", mnemonic = "FDIV", _size = 32)
@ispec_ia32("*>[ {d8} /7 ]", mnemonic = "FDIVR", _size = 32)
@ispec_ia32("*>[ {d9} /0 ]", mnemonic = "FLD", _size = 32)
@ispec_ia32("*>[ {d9} /2 ]", mnemonic = "FST", _size = 32)
@ispec_ia32("*>[ {d9} /3 ]", mnemonic = "FSTP", _size = 32)
@ispec_ia32("*>[ {d9} /4 ]", mnemonic = "FLDENV", _size = 28*8) #TODO : 16 bits size
@ispec_ia32("*>[ {d9} /5 ]", mnemonic = "FLDCW", _size = 16)
@ispec_ia32("*>[ {d9} /6 ]", mnemonic = "FNSTENV", _size = 28*8)
@ispec_ia32("*>[ {d9} /7 ]", mnemonic = "FNSTCW", _size = 16)
@ispec_ia32("*>[ {da} /0 ]", mnemonic = "FIADD", _size = 32)
@ispec_ia32("*>[ {da} /1 ]", mnemonic = "FIMUL", _size = 32)
@ispec_ia32("*>[ {da} /2 ]", mnemonic = "FICOM", _size = 32)
@ispec_ia32("*>[ {da} /3 ]", mnemonic = "FICOMP", _size = 32)
@ispec_ia32("*>[ {da} /4 ]", mnemonic = "FISUB", _size = 32)
@ispec_ia32("*>[ {da} /5 ]", mnemonic = "FISUBR", _size = 32)
@ispec_ia32("*>[ {da} /6 ]", mnemonic = "FIDIV", _size = 32)
@ispec_ia32("*>[ {da} /7 ]", mnemonic = "FIDIVR", _size = 32)
@ispec_ia32("*>[ {db} /0 ]", mnemonic = "FILD", _size = 32)
@ispec_ia32("*>[ {db} /1 ]", mnemonic = "FISTPP", _size = 32)
@ispec_ia32("*>[ {db} /2 ]", mnemonic = "FIST", _size = 32)
@ispec_ia32("*>[ {db} /3 ]", mnemonic = "FISTP", _size = 32)
@ispec_ia32("*>[ {db} /5 ]", mnemonic = "FLD", _size = 80)
@ispec_ia32("*>[ {db} /7 ]", mnemonic = "FSTP", _size = 80)
@ispec_ia32("*>[ {dc} /0 ]", mnemonic = "FADD", _size = 64)
@ispec_ia32("*>[ {dc} /1 ]", mnemonic = "FMUL", _size = 64)
@ispec_ia32("*>[ {dc} /2 ]", mnemonic = "FCOM", _size = 64)
@ispec_ia32("*>[ {dc} /3 ]", mnemonic = "FCOMP", _size = 64)
@ispec_ia32("*>[ {dc} /4 ]", mnemonic = "FSUB", _size = 64)
@ispec_ia32("*>[ {dc} /5 ]", mnemonic = "FSUBR", _size = 64)
@ispec_ia32("*>[ {dc} /6 ]", mnemonic = "FDIV", _size = 64)
@ispec_ia32("*>[ {dc} /7 ]", mnemonic = "FDIVR", _size = 64)
@ispec_ia32("*>[ {dd} /0 ]", mnemonic = "FLD", _size = 64)
@ispec_ia32("*>[ {dd} /1 ]", mnemonic = "FISTPP", _size = 64)
@ispec_ia32("*>[ {dd} /2 ]", mnemonic = "FST", _size = 64)
@ispec_ia32("*>[ {dd} /3 ]", mnemonic = "FSTP", _size = 64)
@ispec_ia32("*>[ {dd} /4 ]", mnemonic = "FRSTOR", _size = 108*8) #TODO : 16 bits size
@ispec_ia32("*>[ {dd} /6 ]", mnemonic = "FNSAVE", _size = 108*8) #TODO : 16 bits size
@ispec_ia32("*>[ {de} /0 ]", mnemonic = "FIADD", _size = 16)
@ispec_ia32("*>[ {de} /1 ]", mnemonic = "FIMUL", _size = 16)
@ispec_ia32("*>[ {de} /2 ]", mnemonic = "FICOM", _size = 16)
@ispec_ia32("*>[ {de} /3 ]", mnemonic = "FICOMP", _size = 16)
@ispec_ia32("*>[ {de} /4 ]", mnemonic = "FISUB", _size = 16)
@ispec_ia32("*>[ {de} /5 ]", mnemonic = "FISUBR", _size = 16)
@ispec_ia32("*>[ {de} /6 ]", mnemonic = "FIDIV", _size = 16)
@ispec_ia32("*>[ {de} /7 ]", mnemonic = "FIDIVR", _size = 16)
@ispec_ia32("*>[ {df} /0 ]", mnemonic = "FILD", _size = 16)
@ispec_ia32("*>[ {df} /1 ]", mnemonic = "FISTPP", _size = 16)
@ispec_ia32("*>[ {df} /2 ]", mnemonic = "FIST", _size = 16)
@ispec_ia32("*>[ {df} /3 ]", mnemonic = "FISTP", _size = 16)
@ispec_ia32("*>[ {df} /4 ]", mnemonic = "FBLD", _size = 80)
@ispec_ia32("*>[ {df} /5 ]", mnemonic = "FILD", _size = 64)
@ispec_ia32("*>[ {df} /6 ]", mnemonic = "FBSTP", _size = 80)
@ispec_ia32("*>[ {df} /7 ]", mnemonic = "FISTP", _size = 64)
@ispec_ia32("*>[ {9b}{d9} /7 ]", mnemonic = "FSTCW", _size = 16)
@ispec_ia32("*>[ {9b}{d9} /6 ]", mnemonic = "FSTENV", _size = 28*8)
@ispec_ia32("*>[ {9b}{dd} /6 ]", mnemonic = "FSAVE", _size = 108*8) #TODO : 16 bits size
@ispec_ia32("*>[ {0f}{ae} /0 ]", mnemonic = "FXSAVE", _size = 512*8)
@ispec_ia32("*>[ {0f}{ae} /1 ]", mnemonic = "FXRSTOR", _size = 512*8)
def ia32_fpu_mem(obj, Mod, RM, data, _size):
op1, data = getModRM(obj,Mod,RM,data)
if op1._is_reg: raise InstructionError(obj)
op1.size = _size
obj.operands = [op1]
obj.type = type_data_processing
@ispec_ia32("24>[ {9b}{df}{e0} ]", mnemonic = "FSTSW")
@ispec_ia32("16>[ {df}{e0} ]", mnemonic = "FNSTSW")
def ia32_fstsw_ax(obj):
obj.operands = [ env.getreg(0, 16) ]
obj.type = type_data_processing
@ispec_ia32("*>[ {9b}{dd} /7 ]", mnemonic = "FSTSW")
@ispec_ia32("*>[ {dd} /7 ]", mnemonic = "FNSTSW")
def ia32_fstsw(obj, Mod, RM, data):
op1,data = getModRM(obj,Mod,RM,data)
obj.operands = [op1]
obj.type = type_data_processing
@ispec_ia32("16>[ {d9}{d0} ]", mnemonic = "FNOP")
@ispec_ia32("16>[ {d9}{e0} ]", mnemonic = "FCHS")
@ispec_ia32("16>[ {d9}{e1} ]", mnemonic = "FABS")
@ispec_ia32("16>[ {d9}{e4} ]", mnemonic = "FTST")
@ispec_ia32("16>[ {d9}{e5} ]", mnemonic = "FXAM")
@ispec_ia32("16>[ {d9}{e8} ]", mnemonic = "FLD1")
@ispec_ia32("16>[ {d9}{e9} ]", mnemonic = "FLDL2T")
@ispec_ia32("16>[ {d9}{ea} ]", mnemonic = "FLDL2E")
@ispec_ia32("16>[ {d9}{eb} ]", mnemonic = "FLDPI")
@ispec_ia32("16>[ {d9}{ec} ]", mnemonic = "FLDLG2")
@ispec_ia32("16>[ {d9}{ed} ]", mnemonic = "FLDLN2")
@ispec_ia32("16>[ {d9}{ee} ]", mnemonic = "FLDZ")
@ispec_ia32("16>[ {d9}{f0} ]", mnemonic = "F2XM1")
@ispec_ia32("16>[ {d9}{f1} ]", mnemonic = "FYL2X")
@ispec_ia32("16>[ {d9}{f2} ]", mnemonic = "FPTAN")
@ispec_ia32("16>[ {d9}{f3} ]", mnemonic = "FPATAN")
@ispec_ia32("16>[ {d9}{f4} ]", mnemonic = "FXTRACT")
@ispec_ia32("16>[ {d9}{f5} ]", mnemonic = "FPREM1")
@ispec_ia32("16>[ {d9}{f6} ]", mnemonic = "FDECSTP")
@ispec_ia32("16>[ {d9}{f7} ]", mnemonic = "FINCSTP")
@ispec_ia32("16>[ {d9}{f8} ]", mnemonic = "FPREM")
@ispec_ia32("16>[ {d9}{f9} ]", mnemonic = "FYL2XP1")
@ispec_ia32("16>[ {d9}{fa} ]", mnemonic = "FSQRT")
@ispec_ia32("16>[ {d9}{fb} ]", mnemonic = "FSINCOS")
@ispec_ia32("16>[ {d9}{fc} ]", mnemonic = "FRNDINT")
@ispec_ia32("16>[ {d9}{fd} ]", mnemonic = "FSCALE")
@ispec_ia32("16>[ {d9}{fe} ]", mnemonic = "FSIN")
@ispec_ia32("16>[ {d9}{ff} ]", mnemonic = "FCOS")
@ispec_ia32("16>[ {da}{e9} ]", mnemonic = "FUCOMPP")
@ispec_ia32("16>[ {db}{e2} ]", mnemonic = "FNCLEX")
@ispec_ia32("16>[ {db}{e3} ]", mnemonic = "FNINIT")
@ispec_ia32("16>[ {de}{d9} ]", mnemonic = "FCOMPP")
@ispec_ia32("24>[ {9b}{db}{e2} ]", mnemonic = "FCLEX")
@ispec_ia32("24>[ {9b}{db}{e3} ]", mnemonic = "FINIT")
def fld_fpu_noop(obj):
obj.type = type_data_processing
@ispec_ia32("16>[ {d8} reg(3) 0 0111 ]", mnemonic = "FSUB", _src=None, _dest=0) # D8 E0+i
@ispec_ia32("16>[ {dc} reg(3) 1 0111 ]", mnemonic = "FSUB", _src=0, _dest=None) # DC E8+i
@ispec_ia32("16>[ {de} reg(3) 1 0111 ]", mnemonic = "FSUBP", _src=0, _dest=None) # DE E8+i
@ispec_ia32("16>[ {d8} reg(3) 1 0111 ]", mnemonic = "FSUBR", _src=None, _dest=0) # D8 E8+i
@ispec_ia32("16>[ {dc} reg(3) 0 0111 ]", mnemonic = "FSUBR", _src=0, _dest=None) # DC E0+i
@ispec_ia32("16>[ {de} reg(3) 0 0111 ]", mnemonic = "FSUBRP", _src=0, _dest=None) # DE E0+i
@ispec_ia32("16>[ {d8} reg(3) 0 0011 ]", mnemonic = "FADD", _src=None, _dest=0) # D8 C0+i
@ispec_ia32("16>[ {dc} reg(3) 0 0011 ]", mnemonic = "FADD", _src=0, _dest=None) # DC C0+i
@ispec_ia32("16>[ {de} reg(3) 0 0011 ]", mnemonic = "FADDP", _src=0, _dest=None) # DE C0+i
@ispec_ia32("16>[ {d8} reg(3) 0 1111 ]", mnemonic = "FDIV", _src=None, _dest=0) # D8 F0+i
@ispec_ia32("16>[ {dc} reg(3) 1 1111 ]", mnemonic = "FDIV", _src=0, _dest=None) # DC F8+i
@ispec_ia32("16>[ {de} reg(3) 1 1111 ]", mnemonic = "FDIVP", _src=0, _dest=None) # DE F8+i
@ispec_ia32("16>[ {d8} reg(3) 1 1111 ]", mnemonic = "FDIVR", _src=None, _dest=0) # D8 F8+i
@ispec_ia32("16>[ {dc} reg(3) 0 1111 ]", mnemonic = "FDIVR", _src=0, _dest=None) # DC F0+i
@ispec_ia32("16>[ {de} reg(3) 0 1111 ]", mnemonic = "FDIVRP", _src=0, _dest=None) # DE F0+i
@ispec_ia32("16>[ {d8} reg(3) 1 0011 ]", mnemonic = "FMUL", _src=None, _dest=0) # D8 C8+i
@ispec_ia32("16>[ {dc} reg(3) 1 0011 ]", mnemonic = "FMUL", _src=0, _dest=None) # DC C8+i
@ispec_ia32("16>[ {de} reg(3) 1 0011 ]", mnemonic = "FMULP", _src=0, _dest=None) # DE C8+i
@ispec_ia32("16>[ {da} reg(3) 0 0011 ]", mnemonic = "FCMOVB", _src=None, _dest=0) # DA C0+i
@ispec_ia32("16>[ {da} reg(3) 1 0011 ]", mnemonic = "FCMOVE", _src=None, _dest=0) # DA C8+i
@ispec_ia32("16>[ {da} reg(3) 0 1011 ]", mnemonic = "FCMOVBE", _src=None, _dest=0) # DA D0+i
@ispec_ia32("16>[ {da} reg(3) 1 1011 ]", mnemonic = "FCMOVU", _src=None, _dest=0) # DA D8+i
@ispec_ia32("16>[ {db} reg(3) 0 0011 ]", mnemonic = "FCMOVNB", _src=None, _dest=0) # DB C0+i
@ispec_ia32("16>[ {db} reg(3) 1 0011 ]", mnemonic = "FCMOVNE", _src=None, _dest=0) # DB C8+i
@ispec_ia32("16>[ {db} reg(3) 0 1011 ]", mnemonic = "FCMOVNBE", _src=None, _dest=0) # DB D0+i
@ispec_ia32("16>[ {db} reg(3) 1 1011 ]", mnemonic = "FCMOVNU", _src=None, _dest=0) # DB D8+i
@ispec_ia32("16>[ {db} reg(3) 0 1111 ]", mnemonic = "FCOMI", _src=None, _dest=0) # DB F0+i
@ispec_ia32("16>[ {df} reg(3) 0 1111 ]", mnemonic = "FCOMIP", _src=None, _dest=0) # DF F0+i
@ispec_ia32("16>[ {db} reg(3) 1 0111 ]", mnemonic = "FUCOMI", _src=None, _dest=0) # DB E8+i
@ispec_ia32("16>[ {df} reg(3) 1 0111 ]", mnemonic = "FUCOMIP", _src=None, _dest=0) # DF E8+i
def ia32_fpu_st(obj, reg, _dest, _src):
# FSUBP
if _dest is None and _src is None:
return
if _dest is None:
_dest = reg
elif _src is None:
_src = reg
op1 = env.st(_dest)
op2 = env.st(_src)
obj.operands = [op1, op2]
obj.type = type_data_processing
|
cyucheng/skimr
|
refs/heads/master
|
skimr/skimr.py
|
1
|
#!/usr/bin/env python
"""
skimr is a web application for streamlining reading of articles online.
It currently works for articles on Medium.com but could be adapted for articles
on other websites with minor adjustments to the html cleaning function.
The framework of skimr is:
- Use selenium webdriver to scrape full HTML of article from user-input url
- Apply a cleaning function to HTML to get full text of the article
- Calculate feature values for sentences
- Features include topic similarity score between sentence and the article
it's from, sentence length, sentence position, and readability metrics
- Topic distributions calculated using Latent Dirichlet Allocation (LDA)
- Use pre-trained logistic regression model to predict highlighted sentences
- Find sentences to be highlighted in HTML of article
- Apply markup to sentences in HTML
- Display article with markups in the browser
To see the full skimr package, visit https://github.com/cyucheng/skimr
Clarence Cheng, 2017
"""
###############################################################################
# Imports
# WEB
from flask import render_template, request
from skimr import app
from selenium import webdriver
from bs4 import BeautifulSoup
# TOOLS
import re
import sys
import pandas as pd
import numpy as np
from scipy import spatial
import pickle
import string
# ML/NLP
from patsy import dmatrices
sys.path.insert(0, 'readability') # From mmautner/readability on GitHub
from readability import Readability # noqa
import nltk # noqa
import nltk.data # noqa
from nltk.tokenize import RegexpTokenizer # noqa
from nltk.corpus import stopwords # noqa
from nltk.stem.porter import PorterStemmer # noqa
from stop_words import get_stop_words # noqa
# Set up tokenizers/stemmers/stopword lists
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
word_tokenizer = RegexpTokenizer('\s+', gaps=True)
p_stemmer = PorterStemmer()
stop_en = get_stop_words('en')
en_words = set(nltk.corpus.words.words())
stopw_en = stopwords.words('english')
all_stopw = set(stopw_en) | set(stop_en)
# Load pickled files
pipe = pd.read_pickle('pkl/model_logreg_std.pkl') # Logistic regression model
ldamodel = pickle.load(open('pkl/lda_model.pkl', 'rb')) # LDA model
commonwords_2 = pickle.load(open('pkl/commonwords.pkl', 'rb')) # Common words
dictionary = pickle.load(open('pkl/lda_dictionary.pkl', 'rb')) # LDA dictionary # noqa
###############################################################################
# Define functions
def webscrape(inputurl):
"""
Retrieves the HTML source for a given URL.
Args:
- inputurl (str): URL for a webpage
Returns:
- html (str): full HTML from webpage
"""
drvr = webdriver.PhantomJS()
drvr.get(inputurl)
html = drvr.execute_script('return document.documentElement.innerHTML;')
return html
###############################################################################
def getfulltext(scrapedhtml):
"""
Gets the full text of the article from the HTML source.
Args:
- scrapedhtml (str): full HTML from webpage
Returns:
- fulltext (str): full text of article from webpage
"""
# Get text from paragraphs inside tag for body of article
lines = []
soup = BeautifulSoup(scrapedhtml, 'lxml')
txt0 = soup.find('div', attrs={'data-source': 'post_page'})
txt1 = txt0.find_all(class_='graf')
# Remove HTML tags
for line in txt1:
txt2 = re.sub('<[^>]+>', '', str(line))
lines.append(txt2)
# Join into full text string
fulltext = ' '.join(lines)
return fulltext
###############################################################################
def calc_params(fulltext):
"""
Calculate feature values for each sentence in the article.
Args:
- fulltext (str): full text of article from webpage
Returns:
- data (df): dataframe with feature values for each sentence
Steps:
- Tokenize full text into sentences
- For each sentence, calculate:
- Topic similarity score: cosine similarity of sentence topic
distribution to article topic distribution
- Position within article in fraction of words and sentences (0 = start
of article, 1 = end of article)
- Sentence length
- Readability metrics
- Put feature values in dataframe
"""
# Initialize lists for each feature; will be inputs to dataframe
all_sents = []
all_ARI = []
all_FRE = []
all_FKG = []
all_GFI = []
all_SMG = []
all_CLI = []
all_LIX = []
all_RIX = []
all_lens = []
all_ldadists = []
all_wposes = []
all_sposes = []
# Compute topic vector for the whole article
# Clean full text
fulltext_prep = clean_text(fulltext)
# Convert article to bag-of-words
text_corpus = dictionary.doc2bow(fulltext_prep)
# Calculate document topic distribution
doc_lda = ldamodel[text_corpus]
# Convert topic distribution to vector
vec_lda_art = lda_to_vec(doc_lda)
# Break full text into sentences
ftsents = sent_tokenizer.tokenize(fulltext)
for f in ftsents:
# Get topic similarity score of sentence vs article
f_clean = clean_text(f)
f_corpus = dictionary.doc2bow(f_clean)
sent_lda = ldamodel[f_corpus]
vec_lda = lda_to_vec(sent_lda)
f_lda = 1-spatial.distance.cosine(vec_lda, vec_lda_art)
all_ldadists.append(f_lda)
# Get sentence position (fraction way through article)
f_wpos, f_spos = sent_pos(f, fulltext)
all_wposes.append(float(f_wpos))
all_sposes.append(float(f_spos))
# Get length of sentence
ftwords = word_tokenizer.tokenize(f)
ftlen = len(ftwords)
all_lens.append(int(ftlen))
# Get readability metrics
f_rd = Readability(f)
all_ARI.append(float(f_rd.ARI()))
all_FRE.append(float(f_rd.FleschReadingEase()))
all_FKG.append(float(f_rd.FleschKincaidGradeLevel()))
all_GFI.append(float(f_rd.GunningFogIndex()))
all_SMG.append(float(f_rd.SMOGIndex()))
all_CLI.append(float(f_rd.ColemanLiauIndex()))
all_LIX.append(float(f_rd.LIX()))
all_RIX.append(float(f_rd.RIX()))
# sentence
all_sents.append(f)
# Build pandas dataframe
data = pd.DataFrame({
'dummy': all_lens,
'sentences': all_sents,
'length': all_lens,
'LDAdist': all_ldadists,
'wordPos': all_wposes,
'sentPos': all_sposes,
'ARI': all_ARI,
'FRE': all_FRE,
'FKG': all_FKG,
'GFI': all_GFI,
'SMG': all_SMG,
'CLI': all_CLI,
'LIX': all_LIX,
'RIX': all_RIX,
})
return data
###############################################################################
def clean_text(text):
"""
Clean text of full article or individual sentences so features can be
calculated.
Args:
- text (str): full text of article or individual sentence
Returns:
- stemmed_nocommon (list): list of processed words in text
Steps:
- Remove punctuation
- Split text into words
- Strip single and double quotes from ends of words
- Remove non-English words
- Remove stopwords
- Ensure no quotes in words before stemming
- Stem words
- Remove any quotes remaining after stemming
- Stem words again to account for words 'masked' by quotes
- Final pass to remove any remaining quotes
- Remove common words, post-stemming
- Common words are those appearing in >=60% of documents (calculated
separately in 4_LDA_analysis.ipynb in skimr/jupyter on GitHub)
"""
translator = str.maketrans('', '', string.punctuation)
txt2 = re.sub(u'\u2014', '', text) # Remove em dashes
txt3 = re.sub(r'\d+', '', txt2) # Remove digits
txt4 = txt3.translate(translator) # Remove punctuation
tokens = word_tokenizer.tokenize(txt4.lower())
tokens_strip = [i.strip('”“’‘') for i in tokens]
tokens_en = [i for i in tokens_strip if i in en_words]
nostop_tokens = [i for i in tokens_en if not (i in all_stopw)]
nostop_strip = [i.strip('”“’‘') for i in nostop_tokens]
stemmed = [p_stemmer.stem(i) for i in nostop_strip]
stemmed_strip = [i.strip('”“’‘') for i in stemmed]
stemmed2 = [p_stemmer.stem(i) for i in stemmed_strip]
stemmed2_strip = [i.strip('”“’‘') for i in stemmed2]
stemmed_nocommon = [i for i in stemmed2_strip if not (i in commonwords_2)]
return stemmed_nocommon
###############################################################################
def lda_to_vec(lda_input):
"""
Convert topic distribution from LDA to a numeric vector that can be
compared to others.
Args:
- lda_input (list): list of tuples [topic_id, topic_probability] output by
LDA model
Returns:
- vec (list): list of topic probabilities
"""
num_topics = 10
vec = [0]*num_topics
for i in lda_input:
col = i[0]
val = i[1]
vec[col] = val
return vec
###############################################################################
def sent_pos(sentence, text):
"""
Calculate position of sentence in article as the fraction of words and
sentences into the text.
Args:
- sentence (str): sentence for which to calculate this
- text (str): full text of article
Returns:
- frc_w (float): fraction of words into the text that sentence begins
- frc_s (float): fraction of sentences into the text that sentence begins
"""
# Break text into sentences and get total sents in full text
full_sents = sent_tokenizer.tokenize(text)
num_sents = len(full_sents)
# Break text into words and get total words in full text
full_words = word_tokenizer.tokenize(text)
num_words = len(full_words)
pos = text.find(sentence)
if pos >= 0:
# Total words in full text before highlight position
b4_words = word_tokenizer.tokenize(text[:pos])
b4_wlen = len(b4_words)
# Sentences in full text before highlight position
b4_sents = sent_tokenizer.tokenize(text[:pos])
b4_slen = len(b4_sents)
frc_w = b4_wlen / num_words
frc_s = b4_slen / num_sents
elif pos < 0:
# If sentence not found in article, set fraction to -1 (there may be a
# better way to do this, e.g. make a categorical variable for missing
# position?)
frc_w = -1
frc_s = -1
return frc_w, frc_s
###############################################################################
def predict(data):
"""
Predict category (0 = non-highlighted, 1 = highlighted) and confidence
score for each sentence.
Args:
- data (df): dataframe with feature values for each sentence
Returns:
- predicted (array): predicted category for each sentence
- decfxn (array): confidence score for each sentence
"""
y, X = dmatrices('dummy ~ length + LDAdist + wordPos + sentPos + ARI + FRE \
+ FKG + GFI + SMG + CLI + LIX + RIX',
data, return_type="dataframe")
y = np.ravel(y)
# Predict value for data
predicted = pipe.predict(X)
# Get confidence score
decfxn = pipe.decision_function(X)
return predicted, decfxn
###############################################################################
def markup(predicted, decfxn, data, scrapedhtml):
"""
Mark up HTML for sentences predicted to be highlighted by the model.
Args:
- predicted (array): predicted category for each sentence
- decfxn (array): confidence score for each sentence
- data (df): dataframe with feature values for each sentence
- scrapedhtml (str): full HTML from webpage
Returns:
- htmlmarkup (BeautifulSoup): Beautiful Soup object for marked-up HTML
"""
soup = BeautifulSoup(scrapedhtml, 'lxml')
predict = list(predicted)
tmpsoup = str(soup)
decision = list(decfxn)
n = 0
for f in data['sentences']:
if predict[n] == 1:
if decision[n] >= 0.1:
# Mark up HTML to highlight sentence
newf = '<span style="background-color: #ffff00">'+f+'</span>'
tmpsoup = tmpsoup.replace(f, newf)
n += 1
outsoup = BeautifulSoup(tmpsoup, 'lxml')
htmlmarkup = outsoup.prettify()
return htmlmarkup
###############################################################################
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/output')
def output():
inputtext = request.args.get('inputtext')
if not inputtext.startswith('http'):
return render_template('error.html')
scrapedhtml = webscrape(inputtext)
cleanedtext = getfulltext(scrapedhtml)
data = calc_params(cleanedtext)
predicted, decfxn = predict(data)
htmlmarkup = markup(predicted, decfxn, data, scrapedhtml)
return render_template('output.html', html=htmlmarkup)
|
tobinjt/Flexget
|
refs/heads/develop
|
flexget/components/notify/notifiers/join.py
|
4
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException
plugin_name = 'join'
log = logging.getLogger(plugin_name)
requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('appspot.com', '5 seconds'))
JOIN_URL = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush'
class JoinNotifier(object):
"""
Example::
notify:
entries:
via:
- join:
[api_key: <API_KEY> (your join api key. Only required for 'group' notifications)]
[group: <GROUP_NAME> (name of group of join devices to notify. 'all', 'android', etc.)
[device: <DEVICE_ID> (can also be a list of device ids)]
[url: <NOTIFICATION_URL>]
[sms_number: <NOTIFICATION_SMS_NUMBER>]
[icon: <NOTIFICATION_ICON>]
"""
schema = {
'type': 'object',
'properties': {
'api_key': {'type': 'string'},
'group': {
'type': 'string',
'enum': ['all', 'android', 'chrome', 'windows10', 'phone', 'tablet', 'pc'],
},
'device': one_or_more({'type': 'string'}),
'device_name': one_or_more({'type': 'string'}),
'url': {'type': 'string'},
'icon': {'type': 'string'},
'sms_number': {'type': 'string'},
'priority': {'type': 'integer', 'minimum': -2, 'maximum': 2},
},
'required': ['api_key'],
'not': {'required': ['device', 'group']},
'error_not': 'Cannot select both \'device\' and \'group\'',
'additionalProperties': False,
}
def notify(self, title, message, config):
"""
Send Join notifications.
"""
notification = {
'title': title,
'text': message,
'url': config.get('url'),
'icon': config.get('icon'),
'priority': config.get('priority'),
'apikey': config['api_key'],
}
if config.get('device'):
if isinstance(config['device'], list):
notification['deviceIds'] = ','.join(config['device'])
else:
notification['deviceId'] = config['device']
elif config.get('group'):
notification['deviceId'] = 'group.' + config['group']
else:
notification['deviceId'] = 'group.all'
if config.get('device_name'):
if isinstance(config['device_name'], list):
notification['deviceNames'] = ','.join(config['device_name'])
else:
notification['deviceNames'] = config['device_name']
if config.get('sms_number'):
notification['smsnumber'] = config['sms_number']
notification['smstext'] = message
try:
response = requests.get(JOIN_URL, params=notification)
except RequestException as e:
raise PluginWarning(e.args[0])
else:
error = response.json().get('errorMessage')
if error:
raise PluginWarning(error)
@event('plugin.register')
def register_plugin():
plugin.register(JoinNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
|
daenamkim/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ios/ios_ping.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ios_ping
short_description: Tests reachability using ping from IOS switch
description:
- Tests reachability using ping from switch to a remote destination.
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
extends_documentation_fragment: ios
options:
count:
description:
- Number of packets to send.
required: false
default: 5
dest:
description:
- The IP Address or hostname (resolvable by switch) of the remote node.
required: true
source:
description:
- The source IP Address.
required: false
default: null
state:
description:
- Determines if the expected result is success or fail.
choices: [ absent, present ]
default: present
vrf:
description:
- The VRF to use for forwarding.
required: false
default: default
'''
EXAMPLES = r'''
- provider:
host: "{{ ansible_host }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Test reachability to 10.10.10.10 using default vrf
ios_ping:
provider: "{{ provider }}"
dest: 10.10.10.10
- name: Test reachability to 10.20.20.20 using prod vrf
ios_ping:
provider: "{{ provider }}"
dest: 10.20.20.20
vrf: prod
- name: Test unreachability to 10.30.30.30 using default vrf
ios_ping:
provider: "{{ provider }}"
dest: 10.30.30.30
state: absent
- name: Test reachability to 10.40.40.40 using prod vrf and setting count and source
ios_ping:
provider: "{{ provider }}"
dest: 10.40.40.40
source: loopback0
vrf: prod
count: 20
'''
RETURN = '''
commands:
description: Show the command sent.
returned: always
type: list
sample: ["ping vrf prod 10.40.40.40 count 20 source loopback0"]
packet_loss:
description: Percentage of packets lost.
returned: always
type: str
sample: "0%"
packets_rx:
description: Packets successfully received.
returned: always
type: int
sample: 20
packets_tx:
description: Packets successfully transmitted.
returned: always
type: int
sample: 20
rtt:
description: Show RTT stats.
returned: always
type: dict
sample: {"avg": 2, "max": 8, "min": 1}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.ios import run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
import re
def main():
""" main entry point for module execution
"""
argument_spec = dict(
count=dict(type="int"),
dest=dict(type="str", required=True),
source=dict(type="str"),
state=dict(type="str", choices=["absent", "present"], default="present"),
vrf=dict(type="str")
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec)
count = module.params["count"]
dest = module.params["dest"]
source = module.params["source"]
vrf = module.params["vrf"]
warnings = list()
check_args(module, warnings)
results = {}
if warnings:
results["warnings"] = warnings
results["commands"] = [build_ping(dest, count, source, vrf)]
ping_results = run_commands(module, commands=results["commands"])
ping_results_list = ping_results[0].split("\n")
success, rx, tx, rtt = parse_ping(ping_results_list[3])
loss = abs(100 - int(success))
results["packet_loss"] = str(loss) + "%"
results["packets_rx"] = int(rx)
results["packets_tx"] = int(tx)
# Convert rtt values to int
for k, v in rtt.items():
if rtt[k] is not None:
rtt[k] = int(v)
results["rtt"] = rtt
validate_results(module, loss, results)
module.exit_json(**results)
def build_ping(dest, count=None, source=None, vrf=None):
"""
Function to build the command to send to the terminal for the switch
to execute. All args come from the module's unique params.
"""
if vrf is not None:
cmd = "ping {0} {1}".format(vrf, dest)
else:
cmd = "ping {0}".format(dest)
if count is not None:
cmd += " repeat {0}".format(str(count))
if source is not None:
cmd += " source {0}".format(source)
return cmd
def parse_ping(ping_stats):
"""
Function used to parse the statistical information from the ping response.
Example: "Success rate is 100 percent (5/5), round-trip min/avg/max = 1/2/8 ms"
Returns the percent of packet loss, recieved packets, transmitted packets, and RTT dict.
"""
rate_re = re.compile(r"^\w+\s+\w+\s+\w+\s+(?P<pct>\d+)\s+\w+\s+\((?P<rx>\d+)/(?P<tx>\d+)\)")
rtt_re = re.compile(r".*,\s+\S+\s+\S+\s+=\s+(?P<min>\d+)/(?P<avg>\d+)/(?P<max>\d+)\s+\w+\s*$|.*\s*$")
rate = rate_re.match(ping_stats)
rtt = rtt_re.match(ping_stats)
return rate.group("pct"), rate.group("rx"), rate.group("tx"), rtt.groupdict()
def validate_results(module, loss, results):
"""
This function is used to validate whether the ping results were unexpected per "state" param.
"""
state = module.params["state"]
if state == "present" and loss == 100:
module.fail_json(msg="Ping failed unexpectedly", **results)
elif state == "absent" and loss < 100:
module.fail_json(msg="Ping succeeded unexpectedly", **results)
if __name__ == "__main__":
main()
|
mogoweb/chromium-crosswalk
|
refs/heads/master
|
native_client_sdk/src/tools/fix_deps.py
|
95
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Fixup GCC-generated dependency files.
Modify GCC generated dependency files so they are more suitable for including
in a GNU Makefile. Without the fixups, deleting or renaming headers can cause
the build to be broken.
See http://mad-scientist.net/make/autodep.html for more details of the problem.
"""
import os
import optparse
import sys
TAG_LINE = '# Updated by fix_deps.py\n'
class Error(Exception):
pass
def ParseLine(line, new_target):
"""Parse one line of a GCC-generated deps file.
Each line contains an optional target and then a list
of space seperated dependencies. Spaces within filenames
are escaped with a backslash.
"""
filenames = []
if new_target and ':' in line:
line = line.split(':', 1)[1]
line = line.strip()
line = line.rstrip('\\')
while True:
# Find the next non-escaped space
line = line.strip()
pos = line.find(' ')
while pos > 0 and line[pos-1] == '\\':
pos = line.find(' ', pos+1)
if pos == -1:
filenames.append(line)
break
filenames.append(line[:pos])
line = line[pos+1:]
return filenames
def FixupDepFile(filename, output_filename=None):
if not os.path.exists(filename):
raise Error('File not found: %s' % filename)
if output_filename is None:
output_filename = filename
outlines = [TAG_LINE]
deps = []
new_target = True
with open(filename) as infile:
for line in infile:
if line == TAG_LINE:
raise Error('Already processed: %s' % filename)
outlines.append(line)
deps += ParseLine(line, new_target)
new_target = line.endswith('\\')
# For every depenency found output a dummy target with no rules
for dep in deps:
outlines.append('%s:\n' % dep)
with open(output_filename, 'w') as outfile:
for line in outlines:
outfile.write(line)
def main(argv):
usage = "usage: %prog [options] <dep_file>"
parser = optparse.OptionParser(usage=usage, description=__doc__)
parser.add_option('-o', '--output', help='Output filename (defaults to '
'input name with .deps extension')
parser.add_option('-c', '--clean', action='store_true',
help='Remove input file after writing output')
options, args = parser.parse_args(argv)
if not args:
raise parser.error('No input file specified')
if len(args) > 1:
raise parser.error('Only one argument supported')
input_filename = args[0]
output_filename = options.output
if not output_filename:
output_filename = os.path.splitext(input_filename)[0] + '.deps'
FixupDepFile(input_filename, output_filename)
if options.clean and input_filename != output_filename:
os.remove(input_filename)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except Error as e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
sys.exit(1)
|
mlavin/django
|
refs/heads/master
|
django/utils/http.py
|
9
|
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from contextlib import suppress
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(k, [str(i) for i in v] if isinstance(v, (list, tuple)) else str(v))
for k, v in query],
doseq
)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
with suppress(Exception):
return parse_http_date(date)
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
if host:
warnings.warn(
"The host argument is deprecated, use allowed_hosts instead.",
RemovedInDjango21Warning,
stacklevel=2,
)
# Avoid mutating the passed in allowed_hosts.
allowed_hosts = allowed_hosts | {host}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
|
aonbyte/projecteuler
|
refs/heads/master
|
src/problem1.py
|
1
|
#Project Euler Problem 1 @ http://projecteuler.net/problem=1
#By Michael H. / Aonbyte @ http://github.com/aonbyte
#Problem 1:
#
#If we list all the natural numbers below 10 that are multiples of 3 or 5, we
#get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the
#multiples of 3 or 5 below 1000.
def multipleOfThree(number):
"""Returns whether or not the number is a multiple of three"""
return (number % 3) is 0
def multipleOfFive(number):
"""Returns whether or not the number is a multiple of five"""
return (number % 5) is 0
listOfNumbers = range(1000)
listOfMultiples = []
for number in listOfNumbers:
if multipleOfThree(number) or multipleOfFive(number):
listOfMultiples.append(number)
sumOfMultiples = sum(listOfMultiples)
print sumOfMultiples
#Solution is 233168
|
zrhans/pythonanywhere
|
refs/heads/master
|
.virtualenvs/django19/lib/python3.4/site-packages/numpy/lib/tests/test__version.py
|
126
|
"""Tests for the NumpyVersion class.
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_, run_module_suite, assert_raises
from numpy.lib import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert_(NumpyVersion('1.8.0') > ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert_(NumpyVersion('1.9.0') < '1.10.0')
assert_(NumpyVersion('1.11.0') < '1.11.1')
assert_(NumpyVersion('1.11.0') == '1.11.0')
assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_alpha_beta_rc():
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert_(NumpyVersion('1.8.0rc1') < ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert_(NumpyVersion('1.8.0rc1') > ver)
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
def test_dev_version():
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
def test_dev0_version():
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
def test_dev0_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
if __name__ == "__main__":
run_module_suite()
|
QuantCrimAtLeeds/PredictCode
|
refs/heads/master
|
open_cp/gui/predictors/pred_type.py
|
1
|
"""
pred_type
~~~~~~~~~
Describe the top level prediction required. E.g.:
- Produce a prediction for each day in the assessment time range, and score
the prediction using the actual events which occurred that day.
- Or... same, but on a weekly basis.
"""
from . import comparitor
import logging
import tkinter as tk
import tkinter.ttk as ttk
import open_cp.gui.tk.util as util
import open_cp.gui.tk.tooltips as tooltips
import open_cp.gui.tk.richtext as richtext
import datetime
_text = {
"main" : ("Prediction Type\n\n"
+ "Using the selected 'assessment time range' as a base, how often do we want to create predictions, and what time range do we wish to 'score' the prediction on?\n\n"
+ "Currently supported is making predictions for a whole number of days, and then testing each prediction for a whole number of days."
+ "You may mix and match, but be aware of 'multiple testing' issues with setting the score interval to be longer than the repeat interval.\n"
+ "For example, the default setting is 1 day and 1 day. This generates a prediction for every day in the selected assessment time range, and then scores each such prediction by comparing against the actual events for that day.\n"
+ "Setting to 7 days and 1 day would generate a prediction for every seven days (i.e. once a week) but would score for just the next day. This could be used to test predictions for just one day of the week."),
"every" : "Repeat interval:",
"everytt" : "How often do we want to create a prediction? Should be a whole number of days.",
"score" : "Score time interval:",
"scorett" : "How long a period of time should each prediction be compared with reality for? Should be a whole number of days.",
"whole_day_warning" : "Currently we only support making predictions for whole days.",
"wdw_round" : "Rounded {} to {}",
"pp" : "Preview of Prediction ranges",
"pp1" : "Predict for {} and score for the next {} day(s)",
"dtfmt" : "%a %d %b %Y",
}
class PredType(comparitor.Comparitor):
def __init__(self, model):
super().__init__(model)
self._every = datetime.timedelta(days=1)
self._score = datetime.timedelta(days=1)
@staticmethod
def describe():
return "Prediction Type required"
@staticmethod
def order():
return comparitor.TYPE_TOP_LEVEL
def make_view(self, parent):
self._view = PredTypeView(parent, self)
return self._view
@property
def name(self):
return "Predict for every {} days, scoring the next {} days".format(
self._every / datetime.timedelta(days=1),
self._score / datetime.timedelta(days=1)
)
@property
def settings_string(self):
return None
def config(self):
return {"resize" : True}
def to_dict(self):
return { "every_interval" : self.every.total_seconds(),
"score_interval" : self.score_length.total_seconds() }
def from_dict(self, data):
every_seconds = data["every_interval"]
self._every = datetime.timedelta(seconds = every_seconds)
score_seconds = data["score_interval"]
self._score = datetime.timedelta(seconds = score_seconds)
@property
def every(self):
"""Period at which to generate predictions."""
return self._every
@every.setter
def every(self, value):
self._every = value
@property
def score_length(self):
"""Length of time to score the prediction on."""
return self._score
@score_length.setter
def score_length(self, value):
self._score = value
@staticmethod
def _just_date(dt):
return datetime.datetime(year=dt.year, month=dt.month, day=dt.day)
def run(self):
"""Returns a list of pairs `(start_date, score_duration)`"""
_, _, _assess_start, _assess_end = self._model.time_range
logger = logging.getLogger(comparitor.COMPARATOR_LOGGER_NAME)
assess_start = self._just_date(_assess_start)
assess_end = self._just_date(_assess_end)
if assess_start != _assess_start or assess_end != _assess_end:
logger.warn(_text["whole_day_warning"])
if assess_start != _assess_start:
logger.warn(_text["wdw_round"].format(_assess_start, assess_start))
if assess_end != _assess_end:
logger.warn(_text["wdw_round"].format(_assess_end, assess_end))
out = []
start = assess_start
while True:
end = start + self.score_length
if end > assess_end:
break
out.append( (start, self.score_length) )
start += self.every
return out
class PredTypeView(tk.Frame):
def __init__(self, parent, model):
super().__init__(parent)
self._model = model
util.stretchy_rows_cols(self, [0], [0])
self._text = richtext.RichText(self, height=12, scroll="v")
self._text.grid(sticky=tk.NSEW, row=0, column=0)
self._text.add_text(_text["main"])
frame = ttk.Frame(parent)
frame.grid(row=1, column=0, sticky=tk.NSEW)
ttk.Label(frame, text=_text["every"]).grid(row=0, column=0, sticky=tk.E, pady=2)
self._every_var = tk.StringVar()
self._every = ttk.Entry(frame, width=5, textvariable=self._every_var)
self._every.grid(row=0, column=1, sticky=tk.W, pady=2)
util.IntValidator(self._every, self._every_var, self.change)
tooltips.ToolTipYellow(self._every, _text["everytt"])
ttk.Label(frame, text=_text["score"]).grid(row=1, column=0, sticky=tk.E, pady=2)
self._score_var = tk.StringVar()
self._score = ttk.Entry(frame, width=5, textvariable=self._score_var)
self._score.grid(row=1, column=1, sticky=tk.W, pady=2)
util.IntValidator(self._score, self._score_var, self.change)
tooltips.ToolTipYellow(self._score, _text["scorett"])
self._preview_frame = ttk.LabelFrame(frame, text=_text["pp"])
self._preview_frame.grid(row=2, column=0, columnspan=2, sticky=tk.NSEW, padx=5, pady=5)
self._preview_label = ttk.Label(self._preview_frame)
self._preview_label.grid(sticky=tk.NSEW)
self.update()
@staticmethod
def _add_time(text, start, length):
text.append( _text["pp1"].format(start.strftime(_text["dtfmt"]), length.days) )
def update(self):
self._every_var.set( int(self._model.every / datetime.timedelta(days=1)) )
self._score_var.set( int(self._model.score_length / datetime.timedelta(days=1)) )
preds = self._model.run()
text = []
for (start, length), _ in zip(preds, range(2)):
self._add_time(text, start, length)
if len(preds) > 3:
text.append("...")
if len(preds) > 2:
start, length = preds[-1]
self._add_time(text, start, length)
self._preview_label["text"] = "\n".join(text)
def change(self):
every = int(self._every_var.get())
if every > 0:
self._model.every = datetime.timedelta(days=every)
score = int(self._score_var.get())
if score > 0:
self._model.score_length = datetime.timedelta(days=score)
self.update()
|
plxaye/chromium
|
refs/heads/master
|
src/tools/metrics/histograms/update_extension_functions.py
|
57
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates ExtensionFunctions enum in histograms.xml file with values read from
extension_function_histogram_value.h.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import logging
import re
import sys
from xml.dom import minidom
from diffutil import PromptUserToAcceptDiff
from pretty_print import PrettyPrintNode
HISTOGRAMS_PATH = 'histograms.xml'
ENUM_NAME = 'ExtensionFunctions'
EXTENSION_FUNCTIONS_HISTOGRAM_VALUE_PATH = \
'../../../chrome/browser/extensions/extension_function_histogram_value.h'
ENUM_START_MARKER = "^enum HistogramValue {"
ENUM_END_MARKER = "^ENUM_BOUNDARY"
class UserError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
@property
def message(self):
return self.args[0]
def ExtractRegexGroup(line, regex):
m = re.match(regex, line)
if m:
return m.group(1)
else:
return None
def ReadHistogramValues(filename):
"""Returns a list of pairs (label, value) corresponding to HistogramValue.
Reads the extension_functions_histogram_value.h file, locates the
HistogramValue enum definition and returns a pair for each entry.
"""
# Read the file as a list of lines
with open(filename) as f:
content = f.readlines()
# Locate the enum definition and collect all entries in it
inside_enum = False # We haven't found the enum definition yet
result = []
for line in content:
line = line.strip()
if inside_enum:
# Exit condition: we reached last enum value
if re.match(ENUM_END_MARKER, line):
inside_enum = False
else:
# Inside enum: generate new xml entry
label = ExtractRegexGroup(line.strip(), "^([\w]+)")
if label:
result.append((label, enum_value))
enum_value += 1
else:
if re.match(ENUM_START_MARKER, line):
inside_enum = True
enum_value = 0 # Start at 'UNKNOWN'
return result
def UpdateHistogramDefinitions(histogram_values, document):
"""Sets the children of <enum name="ExtensionFunctions" ...> node in
|document| to values generated from policy ids contained in
|policy_templates|.
Args:
histogram_values: A list of pairs (label, value) defining each extension
function
document: A minidom.Document object representing parsed histogram
definitions XML file.
"""
# Find ExtensionFunctions enum.
for enum_node in document.getElementsByTagName('enum'):
if enum_node.attributes['name'].value == ENUM_NAME:
extension_functions_enum_node = enum_node
break
else:
raise UserError('No policy enum node found')
# Remove existing values.
while extension_functions_enum_node.hasChildNodes():
extension_functions_enum_node.removeChild(
extension_functions_enum_node.lastChild)
# Add a "Generated from (...)" comment
comment = ' Generated from {0} '.format(
EXTENSION_FUNCTIONS_HISTOGRAM_VALUE_PATH)
extension_functions_enum_node.appendChild(document.createComment(comment))
# Add values generated from policy templates.
for (label, value) in histogram_values:
node = document.createElement('int')
node.attributes['value'] = str(value)
node.attributes['label'] = label
extension_functions_enum_node.appendChild(node)
def Log(message):
logging.info(message)
def main():
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
Log('Reading histogram enum definition from "%s".'
% (EXTENSION_FUNCTIONS_HISTOGRAM_VALUE_PATH))
histogram_values = ReadHistogramValues(
EXTENSION_FUNCTIONS_HISTOGRAM_VALUE_PATH)
Log('Reading existing histograms from "%s".' % (HISTOGRAMS_PATH))
with open(HISTOGRAMS_PATH, 'rb') as f:
histograms_doc = minidom.parse(f)
f.seek(0)
xml = f.read()
Log('Comparing histograms enum with new enum definition.')
UpdateHistogramDefinitions(histogram_values, histograms_doc)
Log('Writing out new histograms file.')
new_xml = PrettyPrintNode(histograms_doc)
if PromptUserToAcceptDiff(xml, new_xml, 'Is the updated version acceptable?'):
with open(HISTOGRAMS_PATH, 'wb') as f:
f.write(new_xml)
Log('Done.')
if __name__ == '__main__':
main()
|
Adnn/django
|
refs/heads/master
|
tests/timezones/tests.py
|
165
|
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
@classmethod
def tearDownClass(cls):
connections['tz'].close()
del connections['tz']
del connections.databases['tz']
super(ForcedTimeZoneDatabaseTests, cls).tearDownClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
event = Event.objects.using('tz').get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.using('tz').create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
|
valentin-krasontovitsch/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/aws_ses_identity.py
|
27
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity
short_description: Manages SES email and domain identity
description:
- This module allows the user to manage verified email and domain identity for SES.
- This covers verifying and removing identities as well as setting up complaint, bounce
and delivery notification settings.
version_added: "2.5"
author: Ed Costello (@orthanc)
options:
identity:
description:
- This is the email address or domain to verify / delete.
- If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
required: true
state:
description: Whether to create(or update) or delete the identity.
default: present
choices: [ 'present', 'absent' ]
bounce_notifications:
description:
- Setup the SNS topic used to report bounce notifications.
- If omitted, bounce notifications will not be delivered to a SNS topic.
- If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
complaint_notifications:
description:
- Setup the SNS topic used to report complaint notifications.
- If omitted, complaint notifications will not be delivered to a SNS topic.
- If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
delivery_notifications:
description:
- Setup the SNS topic used to report delivery notifications.
- If omitted, delivery notifications will not be delivered to a SNS topic.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
feedback_forwarding:
description:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
default: True
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure example@example.com email identity exists
aws_ses_identity:
identity: example@example.com
state: present
- name: Delete example@example.com email identity
aws_ses_identity:
email: example@example.com
state: absent
- name: Ensure example.com domain identity exists
aws_ses_identity:
identity: example.com
state: present
# Create an SNS topic and send bounce and complaint notifications to it
# instead of emailing the identity owner
- name: Ensure complaints-topic exists
sns_topic:
name: "complaints-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Deliver feedback to topic instead of owner email
aws_ses_identity:
identity: example@example.com
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: True
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: False
feedback_forwarding: False
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
- name: Ensure delivery-notifications-topic exists
sns_topic:
name: "delivery-notifications-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Delivery notifications to topic
aws_ses_identity:
identity: example@example.com
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
'''
RETURN = '''
identity:
description: The identity being modified.
returned: success
type: str
sample: example@example.com
identity_arn:
description: The arn of the identity being modified.
returned: success
type: str
sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
verification_attributes:
description: The verification information for the identity.
returned: success
type: complex
sample: {
"verification_status": "Pending",
"verification_token": "...."
}
contains:
verification_status:
description: The verification status of the identity.
type: str
sample: "Pending"
verification_token:
description: The verification token for a domain identity.
type: str
notification_attributes:
description: The notification setup for the identity.
returned: success
type: complex
sample: {
"bounce_topic": "arn:aws:sns:....",
"complaint_topic": "arn:aws:sns:....",
"delivery_topic": "arn:aws:sns:....",
"forwarding_enabled": false,
"headers_in_bounce_notifications_enabled": true,
"headers_in_complaint_notifications_enabled": true,
"headers_in_delivery_notifications_enabled": true
}
contains:
bounce_topic:
description:
- The ARN of the topic bounce notifications are delivered to.
- Omitted if bounce notifications are not delivered to a topic.
type: str
complaint_topic:
description:
- The ARN of the topic complaint notifications are delivered to.
- Omitted if complaint notifications are not delivered to a topic.
type: str
delivery_topic:
description:
- The ARN of the topic delivery notifications are delivered to.
- Omitted if delivery notifications are not delivered to a topic.
type: str
forwarding_enabled:
description: Whether or not feedback forwarding is enabled.
type: bool
headers_in_bounce_notifications_enabled:
description: Whether or not headers are included in messages delivered to the bounce topic.
type: bool
headers_in_complaint_notifications_enabled:
description: Whether or not headers are included in messages delivered to the complaint topic.
type: bool
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
import time
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
# just registered it. Suspect this is an eventual consistency issue on AWS side.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only after registering the identity.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
identity_verification = response['VerificationAttributes']
if identity in identity_verification:
break
time.sleep(retryDelay)
if identity not in identity_verification:
return None
return identity_verification[identity]
def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_notifications doesn't include the notifications when we've
# just registered the identity.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only when getting the current notification
# status for return.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
notification_attributes = response['NotificationAttributes']
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
# eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
#
# When this occurs, just return None and we'll assume no identity notification settings have been changed
# from the default which is reasonable if this is just eventual consistency on creation.
# See: https://github.com/ansible/ansible/issues/36065
if identity in notification_attributes:
break
else:
# Paranoia check for coding errors, we only requested one identity, so if we get a different one
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
identity,
notification_attributes.keys(),
)
)
time.sleep(retryDelay)
if identity not in notification_attributes:
return None
return notification_attributes[identity]
def desired_topic(module, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict:
return arg_dict.get('topic', None)
else:
return None
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
topic_key = notification_type + 'Topic'
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
current = None
elif topic_key in identity_notifications:
current = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
current = None
required = desired_topic(module, notification_type)
if current != required:
try:
if not module.check_mode:
connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
current = False
elif header_key in identity_notifications:
current = identity_notifications[header_key]
else:
# AWS API doc indicates that the headers in fields are optional. Unfortunately
# it's not clear on what this means. But it's a pretty safe assumption that it means
# headers are not included since most API consumers would interpret absence as false.
current = False
if arg_dict is not None and 'include_headers' in arg_dict:
required = arg_dict['include_headers']
else:
required = False
if current != required:
try:
if not module.check_mode:
connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_feedback_forwarding(connection, module, identity, identity_notifications):
if identity_notifications is None:
# AWS requires feedback forwarding to be enabled unless bounces and complaints
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
elif 'ForwardingEnabled' in identity_notifications:
current = identity_notifications['ForwardingEnabled']
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
required = module.params.get('feedback_forwarding')
if current != required:
try:
if not module.check_mode:
connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
return True
return False
def create_mock_notifications_response(module):
resp = {
"ForwardingEnabled": module.params.get('feedback_forwarding'),
}
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict is not None and 'topic' in arg_dict:
resp[notification_type + 'Topic'] = arg_dict['topic']
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if arg_dict is not None and 'include_headers' in arg_dict:
resp[header_key] = arg_dict['include_headers']
else:
resp[header_key] = False
return resp
def update_identity_notifications(connection, module):
identity = module.params.get('identity')
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
if changed or identity_notifications is None:
if module.check_mode:
identity_notifications = create_mock_notifications_response(module)
else:
identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
return changed, identity_notifications
def validate_params_for_identity_present(module):
if module.params.get('feedback_forwarding') is False:
if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
"feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
def create_or_update_identity(connection, module, region, account_id):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
try:
if not module.check_mode:
if '@' in identity:
connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
else:
connection.verify_domain_identity(Domain=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
if module.check_mode:
verification_attributes = {
"VerificationStatus": "Pending",
}
else:
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
verification_attributes=camel_dict_to_snake_dict(verification_attributes))
if verification_attributes is None:
module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
module.fail_json(msg='Unable to load identity notification attributes.')
identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
module.exit_json(
changed=changed,
identity=identity,
identity_arn=identity_arn,
verification_attributes=camel_dict_to_snake_dict(verification_attributes),
notification_attributes=camel_dict_to_snake_dict(notification_attributes),
)
def destroy_identity(connection, module):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
try:
if not module.check_mode:
connection.delete_identity(Identity=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
changed = True
module.exit_json(
changed=changed,
identity=identity,
)
def get_account_id(module):
sts = module.client('sts')
try:
caller_identity = sts.get_caller_identity()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
return caller_identity['Account']
def main():
module = AnsibleAWSModule(
argument_spec={
"identity": dict(required=True, type='str'),
"state": dict(default='present', choices=['present', 'absent']),
"bounce_notifications": dict(type='dict'),
"complaint_notifications": dict(type='dict'),
"delivery_notifications": dict(type='dict'),
"feedback_forwarding": dict(default=True, type='bool'),
},
supports_check_mode=True,
)
for notification_type in ('bounce', 'complaint', 'delivery'):
param_name = notification_type + '_notifications'
arg_dict = module.params.get(param_name)
if arg_dict:
extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
if extra_keys:
module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
region = module.params.get('region')
account_id = get_account_id(module)
validate_params_for_identity_present(module)
create_or_update_identity(connection, module, region, account_id)
else:
destroy_identity(connection, module)
if __name__ == '__main__':
main()
|
dvitme/odoomrp-wip
|
refs/heads/8.0
|
stock_picking_wave_management/models/stock_pack.py
|
27
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields
class StockPackOperation(models.Model):
_inherit = 'stock.pack.operation'
wave = fields.Many2one('stock.picking.wave', related='picking_id.wave_id',
string='Picking Wave', store=True)
|
ArthurMoore85/rb5-slack-bot
|
refs/heads/master
|
bot/beanstalk/__init__.py
|
12133432
| |
debugger22/sympy
|
refs/heads/master
|
sympy/physics/vector/tests/__init__.py
|
12133432
| |
MobinRanjbar/hue
|
refs/heads/master
|
desktop/core/ext-py/django-nose-1.3/testapp/__init__.py
|
12133432
| |
cosmiclattes/TPBviz
|
refs/heads/master
|
torrent/lib/python2.7/site-packages/django/contrib/gis/gdal/prototypes/__init__.py
|
12133432
| |
riptano/brisk
|
refs/heads/master
|
tools/cookbook/brisk/files/default/tokentool.py
|
4
|
#! /usr/bin/python
import sys, random, time
MAXRANGE = (2**127)
DEBUG = False
# MAXRANGE = 100
# DEBUG = True
originalTokens = {}
dcOffsets = {}
zoom = 2
def readInt(number, exitOnFail=False):
returnVal = None
try:
returnVal = int(number)
except:
print "Please input a valid number."
if exitOnFail:
sys.exit(1)
return returnVal
def splitRing(dc, numOfNodes):
global originalTokens
originalTokens[dc] = {}
for i in range(0, numOfNodes):
token = (i * MAXRANGE / numOfNodes) % MAXRANGE
originalTokens[dc][i] = token
def findClosestMinAndMax(thisToken, thisRange):
# Double the range for wrap around tokens
doubleRange = [token + MAXRANGE for token in thisRange]
thisRange = thisRange + doubleRange
minimum = 0
maximum = 0
for token in thisRange:
if token < thisToken:
minimum = token
if token >= thisToken:
maximum = token
break
if not maximum:
maximum = MAXRANGE
return [minimum, thisToken, maximum]
def parseOptions():
global originalTokens, dcOffsets
# Parse Input
if (len(sys.argv) == 1):
print "Command line usage:"
print " tools/tokentool <# of nodes in DC1> [<# of nodes in DC2> <# of nodes in DC3> ...]"
print
sys.exit(0)
elif (len(sys.argv) == 4):
print "Sorry, more than 2 DC's are not yet supported."
print
sys.exit(0)
else:
# Gather then number of datacenters
datacenters = readInt(len(sys.argv) - 1)
# Gather the number of nodes in each datacenter
sizeSet = []
for i in range(0, datacenters):
sizeSet.append(readInt(sys.argv[i + 1], True))
return sizeSet
def initialRingSplit(sizeSet):
# Calculate the inital tokens for each datacenter
for i, node in enumerate(sizeSet):
splitRing(i, node)
# Find the initial DC offsets based on the first node soley
dcs = originalTokens.keys()
dcs.pop(0)
dcOffsets[0] = 0
for dc in dcs:
if len(originalTokens[dc - 1]) > 1:
dcOffsets[dc] = ((originalTokens[dc - 1][1] + dcOffsets[dc - 1]) - (originalTokens[dc - 1][0] + dcOffsets[dc - 1])) / 2
else:
dcOffsets[dc] = MAXRANGE / 2
def noDuplicateTokens(offsetdc=-1, offset=0):
allkeys = []
for dc in originalTokens.keys():
for node in originalTokens[dc].keys():
if dc == offsetdc:
allkeys.append(originalTokens[dc][node] + offset)
else:
allkeys.append(originalTokens[dc][node])
if len(allkeys) == len(set(allkeys)):
return True
else:
return False
def sweepAndFind(dc, otherdc, iteration):
global zoom, dcOffsets
theseTokens = originalTokens[dc].values()
otherTokens = originalTokens[otherdc].values()
fuzzyRange = MAXRANGE / max(len(theseTokens), len(otherTokens))
zoom *= 2
steps = fuzzyRange / zoom
if steps < 1:
return True
if DEBUG:
print "fuzzyRange", fuzzyRange
print "zoom", zoom
print "steps", steps
print
# Starting from the current spot,
# try to gain focus by spinning the ring
# by the current fuzzy interval
currentStep = -(steps * 2)
closestToFocus = MAXRANGE
frozenDCOffset = dcOffsets[dc]
if not iteration:
searchRange = fuzzyRange / (2 ** iteration)
else:
searchRange = steps * 2
while currentStep <= searchRange:
currentFocus = 0
for thisToken in theseTokens:
if False:
print "thisToken", thisToken
print "currentStep", currentStep
print "frozenDCOffset", frozenDCOffset
thisToken = (thisToken + currentStep + frozenDCOffset) % (2 * MAXRANGE)
if False:
print "thisToken", thisToken
minThisMax = findClosestMinAndMax(thisToken, otherTokens)
minimum = minThisMax[0]
maximum = minThisMax[2]
if minimum < maximum:
thisTokenOffset = (maximum - minimum) / 2 + minimum - thisToken
else:
thisTokenOffset = (maximum + MAXRANGE - minimum) / 2 + minimum - thisToken
if DEBUG: print minThisMax, thisTokenOffset
currentFocus += thisTokenOffset
if abs(currentFocus) < closestToFocus:
if DEBUG:
print "dcOffsets[dc]", dcOffsets[dc]
print "currentStep", currentStep
if noDuplicateTokens(dc, currentStep + frozenDCOffset):
dcOffsets[dc] = currentStep + frozenDCOffset
closestToFocus = abs(currentFocus)
currentStep += steps
if DEBUG:
print "currentFocus", currentFocus
print "closestToFocus", closestToFocus
print
if DEBUG:
print "closestToFocus", closestToFocus
def focus():
global originalTokens, dcOffsets, zoom
iteration = 0
doneZooming = False
if len(originalTokens) == 1:
doneZooming = True
while not doneZooming:
# TODO: Confirm no token conflicts
# Loop over all dcs
dcs = originalTokens.keys()
dcs.reverse()
for dc in dcs:
# Allow the first dc to stay in it's initial spot
if dc == 0:
continue
for otherdc in dcs:
# Don't compare the dc to itself
if otherdc == dc:
continue
doneZooming = sweepAndFind(dc, otherdc, iteration)
iteration += 1
if DEBUG:
time.sleep(1)
print "dcOffsets", dcOffsets
print '-------'
def calculateTokens():
for dc in originalTokens.keys():
sortedTokens = []
for node in originalTokens[dc].keys():
sortedTokens.append((originalTokens[dc][node] + dcOffsets[dc]) % MAXRANGE)
sortedTokens.sort()
for node in originalTokens[dc].keys():
originalTokens[dc][node] = sortedTokens[node]
return originalTokens
def printResults():
# Calculate the shifted tokens
calculateTokens()
# Print
for dc in originalTokens.keys():
print "DC%d:" % (dc + 1)
for i, token in enumerate(originalTokens[dc].values()):
print "Node %d: %d" % (i, token)
print
def run():
global originalTokens, dcOffsets
initialRingSplit(parseOptions())
focus()
printResults()
if __name__ == '__main__':
run()
|
altsen/diandiyun-platform
|
refs/heads/master
|
lms/djangoapps/wechat/management/commands/tests/test_dump_course.py
|
16
|
"""Tests for Django management commands"""
import json
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
from path import path
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.testcases import TestCase
from courseware.tests.modulestore_config import TEST_DATA_XML_MODULESTORE
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from courseware.tests.modulestore_config import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_from_xml
DATA_DIR = 'common/test/data/'
TEST_COURSE_ID = 'edX/simple/2012_Fall'
class CommandsTestBase(TestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
def setUp(self):
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
courses = store.get_courses()
if TEST_COURSE_ID not in [c.id for c in courses]:
import_from_xml(store, DATA_DIR, ['toy', 'simple'])
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.strip().split('\n')
self.assertEqual(self.loaded_courses, dumped_courses)
def test_dump_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
parent_id = 'i4x://edX/simple/chapter/Overview'
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = 'i4x://edX/simple/video/Welcome'
self.assertEqual(dump[video_id]['category'], 'video')
self.assertEqual(len(dump[video_id]['metadata']), 4)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
filename = tmp_dir / 'test.tar.gz'
try:
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
finally:
shutil.rmtree(tmp_dir)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = ['edX/simple/2012_Fall', filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/2012_Fall/policy.json', names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class CommandsXMLTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the xml modulestore.
"""
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class CommandsMongoTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the mongo modulestore.
"""
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CommandsMixedTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands. Using the mixed modulestore.
"""
|
Franky666/programmiersprachen-raytracer
|
refs/heads/master
|
external/boost_1_59_0/libs/python/pyste/install/pyste.py
|
13
|
#!/usr/bin/env python
# Copyright Bruno da Silva de Oliveira 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from Pyste import pyste
pyste.main()
|
appsembler/edx-platform
|
refs/heads/appsembler/tahoe/master
|
common/lib/xmodule/setup.py
|
5
|
from setuptools import find_packages, setup
XMODULES = [
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SectionDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"split_test = xmodule.split_test_module:SplitTestDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"imageannotation = xmodule.imageannotation_module:ImageAnnotationDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
XBLOCKS = [
"library = xmodule.library_root_xblock:LibraryRoot",
"vertical = xmodule.vertical_block:VerticalBlock",
"wrapper = xmodule.wrapper_module:WrapperBlock",
]
XBLOCKS_ASIDES = [
'tagging_aside = cms.lib.xblock.tagging:StructuredTagsAside',
]
setup(
name="XModule",
version="0.1.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'setuptools',
'docopt',
'capa',
'path.py',
'webob',
'edx-opaque-keys>=0.4.0,<1.0.0',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES + XBLOCKS,
'xmodule.v1': XMODULES,
'xblock_asides.v1': XBLOCKS_ASIDES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
|
chennqqi/splayer
|
refs/heads/master
|
Thirdparty/jsoncpp/scons-tools/srcdist.py
|
264
|
import os
import os.path
from fnmatch import fnmatch
import targz
##def DoxyfileParse(file_contents):
## """
## Parse a Doxygen source file and return a dictionary of all the values.
## Values will be strings and lists of strings.
## """
## data = {}
##
## import shlex
## lex = shlex.shlex(instream = file_contents, posix = True)
## lex.wordchars += "*+./-:"
## lex.whitespace = lex.whitespace.replace("\n", "")
## lex.escape = ""
##
## lineno = lex.lineno
## last_backslash_lineno = lineno
## token = lex.get_token()
## key = token # the first token should be a key
## last_token = ""
## key_token = False
## next_key = False
## new_data = True
##
## def append_data(data, key, new_data, token):
## if new_data or len(data[key]) == 0:
## data[key].append(token)
## else:
## data[key][-1] += token
##
## while token:
## if token in ['\n']:
## if last_token not in ['\\']:
## key_token = True
## elif token in ['\\']:
## pass
## elif key_token:
## key = token
## key_token = False
## else:
## if token == "+=":
## if not data.has_key(key):
## data[key] = list()
## elif token == "=":
## data[key] = list()
## else:
## append_data( data, key, new_data, token )
## new_data = True
##
## last_token = token
## token = lex.get_token()
##
## if last_token == '\\' and token != '\n':
## new_data = False
## append_data( data, key, new_data, '\\' )
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## if len(v) == 0:
## data.pop(k)
##
## # items in the following list will be kept as lists and not converted to strings
## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
## continue
##
## if len(v) == 1:
## data[k] = v[0]
##
## return data
##
##def DoxySourceScan(node, env, path):
## """
## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
## any files used to generate docs to the list of source files.
## """
## default_file_patterns = [
## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
## '*.py',
## ]
##
## default_exclude_patterns = [
## '*~',
## ]
##
## sources = []
##
## data = DoxyfileParse(node.get_contents())
##
## if data.get("RECURSIVE", "NO") == "YES":
## recursive = True
## else:
## recursive = False
##
## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
##
## for node in data.get("INPUT", []):
## if os.path.isfile(node):
## sources.add(node)
## elif os.path.isdir(node):
## if recursive:
## for root, dirs, files in os.walk(node):
## for f in files:
## filename = os.path.join(root, f)
##
## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
##
## if pattern_check and not exclude_check:
## sources.append(filename)
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
## sources = map( lambda path: env.File(path), sources )
## return sources
##
##
##def DoxySourceScanCheck(node, env):
## """Check if we should scan this file"""
## return os.path.isfile(node.path)
def srcDistEmitter(source, target, env):
## """Doxygen Doxyfile emitter"""
## # possible output formats and their default values and output locations
## output_formats = {
## "HTML": ("YES", "html"),
## "LATEX": ("YES", "latex"),
## "RTF": ("NO", "rtf"),
## "MAN": ("YES", "man"),
## "XML": ("NO", "xml"),
## }
##
## data = DoxyfileParse(source[0].get_contents())
##
## targets = []
## out_dir = data.get("OUTPUT_DIRECTORY", ".")
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
##
## # don't clobber targets
## for node in targets:
## env.Precious(node)
##
## # set up cleaning stuff
## for node in targets:
## env.Clean(node, node)
##
## return (targets, source)
return (target,source)
def generate(env):
"""
Add builders and construction variables for the
SrcDist tool.
"""
## doxyfile_scanner = env.Scanner(
## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
## )
if targz.exists(env):
srcdist_builder = targz.makeBuilder( srcDistEmitter )
env['BUILDERS']['SrcDist'] = srcdist_builder
def exists(env):
"""
Make sure srcdist exists.
"""
return targz.exists(env)
|
goliveirab/odoo
|
refs/heads/8.0
|
openerp/tools/yaml_tag.py
|
337
|
import yaml
import logging
class YamlTag(object):
"""
Superclass for constructors of custom tags defined in yaml file.
__str__ is overriden in subclass and used for serialization in module recorder.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __getitem__(self, key):
return getattr(self, key)
def __getattr__(self, attr):
return None
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, sorted(self.__dict__.items()))
class Assert(YamlTag):
def __init__(self, model, id=None, severity=logging.WARNING, string="NONAME", **kwargs):
self.model = model
self.id = id
self.severity = severity
self.string = string
super(Assert, self).__init__(**kwargs)
class Record(YamlTag):
def __init__(self, model, id, use='id', view=True, **kwargs):
self.model = model
self.id = id
self.view = view
super(Record, self).__init__(**kwargs)
def __str__(self):
return '!record {model: %s, id: %s}:' % (str(self.model,), str(self.id,))
class Python(YamlTag):
def __init__(self, model, severity=logging.ERROR, name="", **kwargs):
self.model= model
self.severity = severity
self.name = name
super(Python, self).__init__(**kwargs)
def __str__(self):
return '!python {model: %s}: |' % (str(self.model), )
class Menuitem(YamlTag):
def __init__(self, id, name, **kwargs):
self.id = id
self.name = name
super(Menuitem, self).__init__(**kwargs)
class Workflow(YamlTag):
def __init__(self, model, action, ref=None, **kwargs):
self.model = model
self.action = action
self.ref = ref
super(Workflow, self).__init__(**kwargs)
def __str__(self):
return '!workflow {model: %s, action: %s, ref: %s}' % (str(self.model,), str(self.action,), str(self.ref,))
class ActWindow(YamlTag):
def __init__(self, **kwargs):
super(ActWindow, self).__init__(**kwargs)
class Function(YamlTag):
def __init__(self, model, name, **kwargs):
self.model = model
self.name = name
super(Function, self).__init__(**kwargs)
class Report(YamlTag):
def __init__(self, model, name, string, **kwargs):
self.model = model
self.name = name
self.string = string
super(Report, self).__init__(**kwargs)
class Delete(YamlTag):
def __init__(self, **kwargs):
super(Delete, self).__init__(**kwargs)
class Context(YamlTag):
def __init__(self, **kwargs):
super(Context, self).__init__(**kwargs)
class Url(YamlTag):
def __init__(self, **kwargs):
super(Url, self).__init__(**kwargs)
class Eval(YamlTag):
def __init__(self, expression):
self.expression = expression
super(Eval, self).__init__()
def __str__(self):
return '!eval %s' % str(self.expression)
class Ref(YamlTag):
def __init__(self, expr="False", *args, **kwargs):
self.expr = expr
super(Ref, self).__init__(*args, **kwargs)
def __str__(self):
return 'ref(%s)' % repr(self.expr)
class IrSet(YamlTag):
def __init__(self):
super(IrSet, self).__init__()
def assert_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Assert(**kwargs)
def record_constructor(loader, node):
kwargs = loader.construct_mapping(node)
assert "model" in kwargs, "'model' argument is required for !record"
assert "id" in kwargs, "'id' argument is required for !record"
return Record(**kwargs)
def python_constructor(loader, node):
kwargs = loader.construct_mapping(node)
kwargs['first_line'] = node.start_mark.line + 1
return Python(**kwargs)
def menuitem_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Menuitem(**kwargs)
def workflow_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Workflow(**kwargs)
def act_window_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return ActWindow(**kwargs)
def function_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Function(**kwargs)
def report_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Report(**kwargs)
def delete_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Delete(**kwargs)
def context_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Context(**kwargs)
def url_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Url(**kwargs)
def eval_constructor(loader, node):
expression = loader.construct_scalar(node)
return Eval(expression)
def ref_constructor(loader, tag_suffix, node):
if tag_suffix == "id":
kwargs = {"id": loader.construct_scalar(node)}
else:
kwargs = loader.construct_mapping(node)
return Ref(**kwargs)
def ir_set_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return IrSet(**kwargs)
# Registers constructors for custom tags.
# Constructors are actually defined globally: do not redefined them in another
# class/file/package. This means that module recorder need import this file.
def add_constructors():
yaml.add_constructor(u"!assert", assert_constructor)
yaml.add_constructor(u"!record", record_constructor)
yaml.add_constructor(u"!python", python_constructor)
yaml.add_constructor(u"!menuitem", menuitem_constructor)
yaml.add_constructor(u"!workflow", workflow_constructor)
yaml.add_constructor(u"!act_window", act_window_constructor)
yaml.add_constructor(u"!function", function_constructor)
yaml.add_constructor(u"!report", report_constructor)
yaml.add_constructor(u"!context", context_constructor)
yaml.add_constructor(u"!delete", delete_constructor)
yaml.add_constructor(u"!url", url_constructor)
yaml.add_constructor(u"!eval", eval_constructor)
yaml.add_multi_constructor(u"!ref", ref_constructor)
yaml.add_constructor(u"!ir_set", ir_set_constructor)
add_constructors()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
OpenBfS/dokpool-plone
|
refs/heads/master
|
Plone/src/elan.theme/elan/theme/setuphandlers.py
|
1
|
def setupVarious(context):
# Ordinarily, GenericSetup handlers check for the existence of XML files.
# Here, we are not parsing an XML file, but we use this text file as a
# flag to check that we actually meant for this import step to be run.
# The file is found in profiles/default.
if context.readDataFile('elan.theme_various.txt') is None:
return
# Add additional setup code here
|
Anwesh43/numpy
|
refs/heads/master
|
doc/f2py/collectinput.py
|
111
|
#!/usr/bin/env python
"""
collectinput - Collects all files that are included to a main Latex document
with \input or \include commands. These commands must be
in separate lines.
Copyright 1999 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
Usage:
collectinput <infile> <outfile>
collectinput <infile> # <outfile>=inputless_<infile>
collectinput # in and out are stdin and stdout
"""
from __future__ import division, absolute_import, print_function
__version__ = "0.0"
stdoutflag=0
import sys
import fileinput
import re
if sys.version_info[0] >= 3:
from subprocess import getoutput
else:
from commands import getoutput
try: fn=sys.argv[2]
except:
try: fn='inputless_'+sys.argv[1]
except: stdoutflag=1
try: fi=sys.argv[1]
except: fi=()
if not stdoutflag:
sys.stdout=open(fn, 'w')
nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)'
input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?')
comment=re.compile(r'[^%]*%')
for l in fileinput.input(fi):
l=l[:-1]
l1=''
if comment.match(l):
m=comment.match(l)
l1=l[m.end()-1:]
l=l[:m.end()-1]
m=input.match(l)
if m:
l=l.strip()
if l[-1]=='}': l=l[:-1]
i=m.end()-2
sys.stderr.write('>>>>>>')
while i>-1 and (l[i] not in [' ', '{']): i=i-1
if i>-1:
fn=l[i+1:]
try: f=open(fn, 'r'); flag=1; f.close()
except:
try: f=open(fn+'.tex', 'r'); flag=1;fn=fn+'.tex'; f.close()
except: flag=0
if flag==0:
sys.stderr.write('Could not open a file: '+fn+'\n')
print(l+l1)
continue
elif flag==1:
sys.stderr.write(fn+'\n')
print('%%%%% Begin of '+fn)
print(getoutput(sys.argv[0]+' < '+fn))
print('%%%%% End of '+fn)
else:
sys.stderr.write('Could not extract a file name from: '+l)
print(l+l1)
else:
print(l+l1)
sys.stdout.close()
|
LAsbun/scrapy_practice
|
refs/heads/master
|
scrapy_practice/__init__.py
|
12133432
| |
maestro-hybrid-cloud/ceilometer
|
refs/heads/master
|
ceilometer/tests/unit/energy/__init__.py
|
12133432
| |
saurabh6790/tru_app_back
|
refs/heads/master
|
controllers/__init__.py
|
12133432
| |
harisibrahimkv/django
|
refs/heads/master
|
django/core/cache/backends/__init__.py
|
12133432
| |
PolicyStat/selenium-old
|
refs/heads/master
|
py/selenium/webdriver/common/keys.py
|
39
|
# copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License Version 2.0 = uthe "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Keys(object):
NULL = u'\ue000'
CANCEL = u'\ue001' # ^break
HELP = u'\ue002'
BACK_SPACE = u'\ue003'
TAB = u'\ue004'
CLEAR = u'\ue005'
RETURN = u'\ue006'
ENTER = u'\ue007'
SHIFT = u'\ue008'
LEFT_SHIFT = u'\ue008' # alias
CONTROL = u'\ue009'
LEFT_CONTROL = u'\ue009' # alias
ALT = u'\ue00a'
LEFT_ALT = u'\ue00a' # alias
PAUSE = u'\ue00b'
ESCAPE = u'\ue00c'
SPACE = u'\ue00d'
PAGE_UP = u'\ue00e'
PAGE_DOWN = u'\ue00f'
END = u'\ue010'
HOME = u'\ue011'
LEFT = u'\ue012'
ARROW_LEFT = u'\ue012' # alias
UP = u'\ue013'
ARROW_UP = u'\ue013' # alias
RIGHT = u'\ue014'
ARROW_RIGHT = u'\ue014' # alias
DOWN = u'\ue015'
ARROW_DOWN = u'\ue015' # alias
INSERT = u'\ue016'
DELETE = u'\ue017'
SEMICOLON = u'\ue018'
EQUALS = u'\ue019'
NUMPAD0 = u'\ue01a' # numbe pad keys
NUMPAD1 = u'\ue01b'
NUMPAD2 = u'\ue01c'
NUMPAD3 = u'\ue01d'
NUMPAD4 = u'\ue01e'
NUMPAD5 = u'\ue01f'
NUMPAD6 = u'\ue020'
NUMPAD7 = u'\ue021'
NUMPAD8 = u'\ue022'
NUMPAD9 = u'\ue023'
MULTIPLY = u'\ue024'
ADD = u'\ue025'
SEPARATOR = u'\ue026'
SUBTRACT = u'\ue027'
DECIMAL = u'\ue028'
DIVIDE = u'\ue029'
F1 = u'\ue031' # function keys
F2 = u'\ue032'
F3 = u'\ue033'
F4 = u'\ue034'
F5 = u'\ue035'
F6 = u'\ue036'
F7 = u'\ue037'
F8 = u'\ue038'
F9 = u'\ue039'
F10 = u'\ue03a'
F11 = u'\ue03b'
F12 = u'\ue03c'
META = u'\ue03d'
COMMAND = u'\ue03d'
|
edersondisouza/soletta
|
refs/heads/master
|
data/scripts/check-api.py
|
8
|
#!/usr/bin/env python3
# This file is part of the Soletta (TM) Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import sys
functionsPattern = re.compile('(\w*\s*\w+)\s\**\s*(\w+)\([\w*,\s\(\).\[\]]+\)[\s\w,\(\)]*(;|\s#ifndef)')
variablesPattern = re.compile('extern[\s\w]+?\**(\w+)[\[\d\]]*;')
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description=""" The check-api script checks if all the exported functions/variables
declared in the installed headers are properly present in the version script file""")
argparser.add_argument("--version_script", type=str, help=""" Path to the version script """, required=True)
argparser.add_argument("--src_dir", type=str, help=""" Source directory """, required=True)
args = argparser.parse_args()
exitWithErr = False
missingSymbols = {}
with open(args.version_script) as fData:
versionScriptSymbols = re.findall('(?<!})\s+(\w+);', fData.read())
for root, dirs, files in os.walk(args.src_dir):
if not root.endswith("include"):
continue
for f in files:
contents = ""
with open(os.path.join(root, f)) as fData:
contents = fData.read()
headerExportedFunctions = functionsPattern.findall(contents)
headerExportedFunctions = list(filter(lambda symbol: False if "return" in symbol[0] or
"inline" in symbol[0] else True, headerExportedFunctions))
exportedSymbols = variablesPattern.findall(contents)
exportedSymbols = exportedSymbols + list(map(lambda symbol: symbol[1], headerExportedFunctions))
for exported in exportedSymbols:
if exported.startswith("SOL_FLOW_PACKET_TYPE_"): #A lovely whitelist <3
continue
if not exported in versionScriptSymbols:
if not f in missingSymbols:
missingSymbols[f] = [exported]
else:
missingSymbols[f].append(exported)
else:
versionScriptSymbols.remove(exported)
if len(missingSymbols):
print("Symbols that were not found at '%s'\n\n" % (args.version_script))
for key in missingSymbols:
print("\nFile: %s - Missing symbols: %s" % (key, missingSymbols[key]))
exitWithErr = True
if len(versionScriptSymbols):
print("\n\nSymbols declared at '%s' that were not found in the exported headers: %s" % (args.version_script, versionScriptSymbols))
exitWithErr = True
if exitWithErr:
sys.exit(-1)
print("All exported symbols are present in " + args.version_script)
sys.exit(0)
|
ebu/PlugIt
|
refs/heads/master
|
tests/helpers/pop_server/server.py
|
1
|
"""
Small POP server. Heavilly based on
pypopper: a file-based pop3 server (http://code.activestate.com/recipes/534131-pypopper-python-pop3-server/)
Useage:
python server.py
Will return all mail*.txt in the current folder as mail. Output is also printed.
"""
import logging
import socket
import glob
logging.basicConfig(format="%(message)s")
log = logging.getLogger("pypopper")
log.setLevel(logging.INFO)
class ChatterboxConnection(object):
END = "\r\n"
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def sendall(self, data, END=END):
if len(data) < 50:
log.debug("send: %r", data)
else:
log.debug("send: %r...", data[:50])
data += END
self.conn.sendall(data)
def recvall(self, END=END):
data = []
while True:
chunk = self.conn.recv(4096)
if END in chunk:
data.append(chunk[:chunk.index(END)])
break
data.append(chunk)
if len(data) > 1:
pair = data[-2] + data[-1]
if END in pair:
data[-2] = pair[:pair.index(END)]
data.pop()
break
log.debug("recv: %r", "".join(data))
return "".join(data)
class Message(object):
def __init__(self, filename):
global MSG_INDEX
msg = open(filename, "r")
try:
self.data = data = msg.read()
self.size = len(data)
self.top, bot = data.split("\r\n\r\n", 1)
self.bot = bot.split("\r\n")
self.index = int(filename.split('mail')[1].split('.txt')[0])
finally:
msg.close()
def handleUser(data, msgs):
log.info("USER:%s", data.split()[1])
return "+OK user accepted"
def handlePass(data, msgs):
log.info("PASS:%s", data.split()[1])
return "+OK pass accepted"
def handleStat(data, msgs):
return "+OK %i %i" % (len(msgs), sum([msg.size for msg in msgs]))
def handleList(data, msgs):
return "+OK %i messages (%i octets)\r\n%s\r\n." % (len(msgs), sum([msg.size for msg in msgs]), '\r\n'.join(["%i %i" % (msg.index, msg.size,) for msg in msgs]))
def handleTop(data, msgs):
cmd, num, lines = data.split()
lines = int(lines)
msg = msgs[int(num) - 1]
text = msg.top + "\r\n\r\n" + "\r\n".join(msg.bot[:lines])
return "+OK top of message follows\r\n%s\r\n." % text
def handleRetr(data, msgs):
log.info("RETRIVE:%s", data.split()[1])
msg = msgs[int(data.split()[1]) - 1]
return "+OK %i octets\r\n%s\r\n." % (msg.size, msg.data)
def handleDele(data, msgs):
log.info("DELETE:%s", data.split()[1])
return "+OK message 1 deleted"
def handleNoop(data, msgs):
return "+OK"
def handleQuit(data, msgs):
return "+OK pypopper POP3 server signing off"
dispatch = dict(
USER=handleUser,
PASS=handlePass,
STAT=handleStat,
LIST=handleList,
TOP=handleTop,
RETR=handleRetr,
DELE=handleDele,
NOOP=handleNoop,
QUIT=handleQuit,
)
def serve(host, port, filenames):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
try:
if host:
hostname = host
else:
hostname = "localhost"
log.debug("pypopper POP3 serving '%s' on %s:%s", filenames, hostname, port)
while True:
sock.listen(1)
conn, addr = sock.accept()
log.debug('Connected by %s', addr)
try:
msgs = range(0, len(filenames))
for f in filenames:
msg = Message(f)
msgs[msg.index-1] = msg
conn = ChatterboxConnection(conn)
conn.sendall("+OK pypopper file-based pop3 server ready")
while True:
data = conn.recvall()
command = data.split(None, 1)[0]
try:
cmd = dispatch[command]
except KeyError:
conn.sendall("-ERR unknown command")
else:
conn.sendall(cmd(data, msgs))
if cmd is handleQuit:
return
finally:
conn.close()
msgs = None
except (SystemExit, KeyboardInterrupt):
log.info("pypopper stopped")
except Exception as ex:
log.critical("fatal error", exc_info=ex)
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
if __name__ == "__main__":
filenames = glob.glob("./mail[0-9]*.txt")
serve("127.0.0.1", 22110, filenames)
|
912/M-new
|
refs/heads/master
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/db/migrations/state.py
|
5
|
from __future__ import unicode_literals
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.fields.related import do_pending_lookups
from django.conf import settings
from django.utils import six
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
self.apps = None
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models=dict((k, v.clone()) for k, v in self.models.items()),
real_apps=self.real_apps,
)
def render(self, include_real=None, ignore_swappable=False):
"Turns the project state into actual models in a new Apps"
if self.apps is None:
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
real_models = []
for app_label in self.real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
real_models.append(ModelState.from_model(model))
# Populate the app registry with a stub for each application.
app_labels = set(model_state.app_label for model_state in self.models.values())
self.apps = Apps([AppConfigStub(label) for label in sorted(self.real_apps + list(app_labels))])
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(self.models.values()) + real_models
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self.apps)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError("Cannot resolve bases for %r" % new_unrendered_models)
unrendered_models = new_unrendered_models
# make sure apps has no dangling references
if self.apps._pending_lookups:
# There's some lookups left. See if we can first resolve them
# ourselves - sometimes fields are added after class_prepared is sent
for lookup_model, operations in self.apps._pending_lookups.items():
try:
model = self.apps.get_model(lookup_model[0], lookup_model[1])
except LookupError:
if "%s.%s" % (lookup_model[0], lookup_model[1]) == settings.AUTH_USER_MODEL and ignore_swappable:
continue
# Raise an error with a best-effort helpful message
# (only for the first issue). Error message should look like:
# "ValueError: Lookup failed for model referenced by
# field migrations.Book.author: migrations.Author"
raise ValueError("Lookup failed for model referenced by field {field}: {model[0]}.{model[1]}".format(
field=operations[0][1],
model=lookup_model,
))
else:
do_pending_lookups(model)
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models():
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
@classmethod
def from_model(cls, model):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
def clone(self):
"Returns an exact copy of this ModelState"
# We deep-clone the fields using deconstruction
fields = []
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
fields.append((name, field_class(*args, **kwargs)))
# Now make a copy
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=fields,
options=dict(self.options),
bases=self.bases,
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
if "unique_together" in meta_contents:
meta_contents["unique_together"] = list(meta_contents["unique_together"])
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.fields)
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases)
)
def __ne__(self, other):
return not (self == other)
|
diego-d5000/MisValesMd
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/az/__init__.py
|
12133432
| |
wwf5067/statsmodels
|
refs/heads/master
|
docs/sphinxext/numpy_ext/__init__.py
|
12133432
| |
kuiwei/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/tests/__init__.py
|
12133432
| |
flipjack/django
|
refs/heads/master
|
oscar/models/__init__.py
|
12133432
| |
blighj/django
|
refs/heads/master
|
tests/queries/__init__.py
|
12133432
| |
SusanJL/iris
|
refs/heads/master
|
docs/iris/example_tests/test_SOI_filtering.py
|
11
|
# (C) British Crown Copyright 2012 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import Iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from .extest_util import (add_examples_to_path,
show_replaced_by_check_graphic,
fail_any_deprecation_warnings)
class TestSOIFiltering(tests.GraphicsTest):
"""Test the SOI_filtering example code."""
def test_soi_filtering(self):
with fail_any_deprecation_warnings():
with add_examples_to_path():
import SOI_filtering
with show_replaced_by_check_graphic(self):
SOI_filtering.main()
if __name__ == '__main__':
tests.main()
|
drakuna/odoo
|
refs/heads/master
|
addons/payment_ogone/models/__init__.py
|
895
|
# -*- coding: utf-8 -*-
import ogone
|
modulexcite/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_contacts/test_layer.py
|
68
|
from yowsup.layers import YowProtocolLayerTest
from yowsup.layers.protocol_contacts import YowContactsIqProtocolLayer
from yowsup.layers.protocol_contacts.protocolentities.test_notification_contact_add import entity as addEntity
from yowsup.layers.protocol_contacts.protocolentities.test_notification_contact_update import entity as updateEntity
from yowsup.layers.protocol_contacts.protocolentities.test_notification_contact_remove import entity as removeEntity
from yowsup.layers.protocol_contacts.protocolentities.test_iq_sync_result import entity as syncResultEntity
from yowsup.layers.protocol_contacts.protocolentities.test_iq_sync_get import entity as syncGetEntity
class YowContactsIqProtocolLayerTest(YowProtocolLayerTest, YowContactsIqProtocolLayer):
def setUp(self):
YowContactsIqProtocolLayer.__init__(self)
def test_sync(self):
self.assertSent(syncGetEntity)
def test_syncResult(self):
self.assertReceived(syncResultEntity)
def test_notificationAdd(self):
self.assertReceived(addEntity)
def test_notificationUpdate(self):
self.assertReceived(updateEntity)
def test_notificationRemove(self):
self.assertReceived(removeEntity)
|
gdgellatly/OCB1
|
refs/heads/7.0
|
addons/web_api/__init__.py
|
12133432
| |
BQLQ/BQLQ
|
refs/heads/master
|
mezzanine-4.2.3/mezzanine/core/migrations/__init__.py
|
12133432
| |
knifenomad/django
|
refs/heads/master
|
django/conf/locale/et/__init__.py
|
12133432
| |
waustin/django-simple-faq
|
refs/heads/master
|
faq/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table(u'faq_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=75)),
('display_order', self.gf('django.db.models.fields.PositiveIntegerField')(default=1, db_index=True)),
))
db.send_create_signal(u'faq', ['Category'])
# Adding model 'Question'
db.create_table(u'faq_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.TextField')()),
('answer', self.gf('django.db.models.fields.TextField')()),
('display_order', self.gf('django.db.models.fields.PositiveIntegerField')(default=1, db_index=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(related_name='questions', null=True, on_delete=models.SET_NULL, to=orm['faq.Category'])),
))
db.send_create_signal(u'faq', ['Question'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table(u'faq_category')
# Deleting model 'Question'
db.delete_table(u'faq_question')
models = {
u'faq.category': {
'Meta': {'ordering': "('display_order',)", 'object_name': 'Category'},
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '75'})
},
u'faq.question': {
'Meta': {'ordering': "('display_order',)", 'object_name': 'Question'},
'answer': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['faq.Category']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['faq']
|
piyushroshan/xen-4.3
|
refs/heads/master
|
tools/python/xen/web/protocol.py
|
52
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd.
#============================================================================
class Protocol:
def __init__(self):
self.transport = None
def setTransport(self, transport):
self.transport = transport
def dataReceived(self, data):
raise NotImplementedError()
def write(self, data):
if self.transport:
return self.transport.write(data)
else:
return 0
def read(self):
if self.transport:
return self.transport.read()
else:
return None
|
bsmr-eve/Pyfa
|
refs/heads/master
|
eos/effects/fighterabilitylaunchbomb.py
|
1
|
# fighterAbilityLaunchBomb
#
# Used by:
# Fighters from group: Heavy Fighter (16 of 34)
"""
Since fighter abilities do not have any sort of item entity in the EVE database, we must derive the abilities from the
effects, and thus this effect file contains some custom information useful only to fighters.
"""
# User-friendly name for the ability
displayName = "Bomb"
# Attribute prefix that this ability targets
prefix = "fighterAbilityLaunchBomb"
type = "active"
# This flag is required for effects that use charges in order to properly calculate reload time
hasCharges = True
def handler(fit, src, context):
pass
|
yceruto/django
|
refs/heads/master
|
django/conf/locale/sr/__init__.py
|
12133432
| |
mcardillo55/django
|
refs/heads/master
|
tests/template_backends/apps/importerror/templatetags/__init__.py
|
12133432
| |
gaddman/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/redfish/__init__.py
|
12133432
| |
stephentyrone/swift
|
refs/heads/master
|
benchmark/scripts/generate_harness/generate_harness.py
|
17
|
#!/usr/bin/env python
# ===--- generate_harness.py ---------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
# Generate boilerplate, CMakeLists.txt and utils/main.swift from templates.
from __future__ import print_function
import argparse
import os
import subprocess
script_dir = os.path.dirname(os.path.realpath(__file__))
perf_dir = os.path.realpath(os.path.join(script_dir, "../.."))
gyb = os.path.realpath(os.path.join(perf_dir, "../utils/gyb"))
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-dir", help="Output directory (for validation test)", default=perf_dir
)
args = parser.parse_args()
output_dir = args.output_dir
def all_files(directory, extension): # matching: [directory]/**/*[extension]
return [
os.path.join(root, f)
for root, _, files in os.walk(directory)
for f in files
if f.endswith(extension)
]
def will_write(filename): # ensure path to file exists before writing
print(filename)
output_path = os.path.split(filename)[0]
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == "__main__":
# Generate Your Boilerplate
# Make sure longer paths are done first as CMakeLists.txt and main.swift
# depend on the other gybs being generated first.
gyb_files = sorted(all_files(perf_dir, ".gyb"), key=len, reverse=True)
for f in gyb_files:
relative_path = os.path.relpath(f[:-4], perf_dir)
out_file = os.path.join(output_dir, relative_path)
will_write(out_file)
subprocess.call([gyb, "--line-directive", "", "-o", out_file, f])
|
jf87/smap
|
refs/heads/master
|
python/smap/drivers/file.py
|
1
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
from smap import actuate, driver
from smap.authentication import authenticated
class _Actuator(actuate.SmapActuator):
"""Example Acutator which implements actuation by writing
to a file
"""
def __init__(self, filename=None):
self.file = os.path.expanduser(filename)
def get_state(self, request):
try:
with open(self.file, 'r') as fp:
return self.parse_state(fp.read())
except IOError:
return None
# @authenticated(['__has_ssl__'])
def set_state(self, request, state):
with open(self.file, 'w') as fp:
fp.write(str(state))
return state
class BinaryActuator(_Actuator, actuate.BinaryActuator):
def __init__(self, filename=None, range=None):
_Actuator.__init__(self, filename)
actuate.BinaryActuator.__init__(self)
class ContinuousActuator(_Actuator, actuate.ContinuousActuator):
def __init__(self, filename=None, range=None):
_Actuator.__init__(self, filename)
actuate.ContinuousActuator.__init__(self, range)
class DiscreteActuator(_Actuator, actuate.NStateActuator):
def __init__(self, filename=None, states=None):
_Actuator.__init__(self, filename)
actuate.NStateActuator.__init__(self, states)
class FileDriver(driver.SmapDriver):
"""Driver which creates a single point backed by a file. You
could use this, for instance, to expose flags in /proc"""
def setup(self, opts):
# set up an appropriate actuator
filename = opts.pop('Filename', '~/FileActuatorFile')
data_type = 'long'
if not 'model' in opts or opts['model'] == 'binary':
act = BinaryActuator(filename)
elif opts['model'] == 'discrete':
act = DiscreteActuator(filename=filename, states=['cat', 'dog'])
elif opts['model'] == 'continuous':
act = ContinuousActuator(filename=filename, range=map(float, opts.pop('range')))
data_type = 'double'
else:
raise ValueError("Invalid actuator model: " + opts['model'])
self.add_actuator('/point0', 'Switch Position',
act, data_type=data_type, write_limit=0)
self.set_metadata('/point0', {
'Metadata/PointName' : 'SDH.S4-15.AI 3'
})
|
jumpstarter-io/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/routers/ports/tabs.py
|
86
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/networks/ports/_detail_overview.html"
failure_url = 'horizon:project:routers:index'
def get_context_data(self, request):
port_id = self.tab_group.kwargs['port_id']
try:
port = api.neutron.port_get(self.request, port_id)
except Exception:
redirect = reverse(self.failure_url)
msg = _('Unable to retrieve port details.')
exceptions.handle(request, msg, redirect=redirect)
return {'port': port}
class PortDetailTabs(tabs.TabGroup):
slug = "port_details"
tabs = (OverviewTab,)
|
yashsharan/sympy
|
refs/heads/master
|
doc/src/conf.py
|
7
|
# -*- coding: utf-8 -*-
#
# SymPy documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 22 19:34:32 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import sympy
# If your extensions are in another directory, add it here.
sys.path = ['../sympy', 'ext'] + sys.path
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'numpydoc', 'sympylive', 'sphinx.ext.graphviz', ]
# Use this to use pngmath instead
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ]
# MathJax file, which is free to use. See http://www.mathjax.org/docs/2.0/start.html
mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SymPy'
copyright = '2017 SymPy Development Team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = sympy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_theme = 'classic'
html_logo = '_static/sympylogo.png'
html_favicon = '../_build/logo/sympy-notailtext-favicon.ico'
# See http://sphinx-doc.org/theming.html#builtin-themes.
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
html_domain_indices = ['py-modindex']
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymPydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual], toctree_only).
# toctree_only is set to True so that the start file document itself is not included in the
# output, only the documents referenced by it via TOC trees. The extra stuff in the master
# document is intended to show up in the HTML, but doesn't really belong in the LaTeX output.
latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation',
'SymPy Development Team', 'manual', True)]
# Additional stuff for the LaTeX preamble.
# Tweaked to work with XeTeX.
latex_elements = {
'babel': '',
'fontenc': r'''
\usepackage{bm}
\usepackage{amssymb}
\usepackage{fontspec}
\usepackage[english]{babel}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
'fontpkg': '',
'inputenc': '',
'utf8extra': '',
'preamble': r'''
% redefine \LaTeX to be usable in math mode
\expandafter\def\expandafter\LaTeX\expandafter{\expandafter\text\expandafter{\LaTeX}}
'''
}
# SymPy logo on title page
html_logo = '_static/sympylogo.png'
latex_logo = '_static/sympylogo_big.png'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Show page numbers next to internal references
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
default_role = 'math'
pngmath_divpng_args = ['-gamma 1.5', '-D 110']
# Note, this is ignored by the mathjax extension
# Any \newcommand should be defined in the file
pngmath_latex_preamble = '\\usepackage{amsmath}\n' \
'\\usepackage{bm}\n' \
'\\usepackage{amsfonts}\n' \
'\\usepackage{amssymb}\n' \
'\\setlength{\\parindent}{0pt}\n'
texinfo_documents = [
(master_doc, 'sympy', 'SymPy Documentation', 'SymPy Development Team',
'SymPy', 'Computer algebra system (CAS) in Python', 'Programming', 1),
]
# Use svg for graphviz
graphviz_output_format = 'svg'
|
clld/clldutils
|
refs/heads/master
|
tests/test_markup.py
|
1
|
import io
from operator import itemgetter
import pytest
from clldutils.markup import Table, iter_markdown_tables, iter_markdown_sections
def test_Table():
t = Table()
assert t.render() == ''
t = Table('a', 'b', rows=[[1, 2], [3, 4]])
assert t.render() == \
'| a | b |\n|----:|----:|\n| 1 | 2 |\n| 3 | 4 |'
assert t.render(condensed=False) == \
'| a | b |\n|----:|----:|\n| 1 | 2 |\n| 3 | 4 |'
assert t.render(verbose=True) == \
'| a | b |\n|----:|----:|\n| 1 | 2 |\n| 3 | 4 |\n\n(2 rows)\n\n'
assert t.render(sortkey=itemgetter(1), reverse=True) == \
'| a | b |\n|----:|----:|\n| 3 | 4 |\n| 1 | 2 |'
def test_Table_context(capsys):
with Table('a', 'b', tablefmt='simple') as t:
t.append([1, 2.345])
out, _ = capsys.readouterr()
assert out == ' a b\n--- ----\n 1 2.35\n'
f = io.StringIO()
with Table('a', 'b', tablefmt='simple', file=f) as t:
t.append([1, 2.345])
assert f.getvalue() == ' a b\n--- ----\n 1 2.35\n'
def test_iter_markdown_tables():
header, rows = ['a', 'b'], [[1, 2], [3, 4]]
text = Table(*header, **dict(rows=rows)).render() + '\nabcd'
assert list(iter_markdown_tables(text))[0] == \
(header, [[str(v) for v in r] for r in rows])
assert list(iter_markdown_tables('a|b\n---|---\n1|2'))[0] == (header, [['1', '2']])
@pytest.mark.parametrize(
'text',
[
'leading\n# title\n\n## sec1\nsec1 content\n\n## sec2\n\n',
'\n# title\n\n## sec1\nsec1 content\n\n## sec2',
]
)
def test_iter_markdown_sections(text):
res = []
for _, header, content in iter_markdown_sections(text):
res.extend(t for t in [header, content] if t is not None)
assert ''.join(res) == text
|
andresguisado/andresguisado.github.io
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/manni.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
|
gdos/legume
|
refs/heads/master
|
tests/test_keepalive.py
|
2
|
# legume. Copyright 2009-2013 Dale Reidy. All rights reserved.
# See LICENSE for details.
import sys
sys.path.append('..')
import legume.timing as time
import unittest
import legume
from greenbar import GreenBarRunner
def getRandomPort():
import random
return random.randint(16000, 50000)
class TestKeepAlive(unittest.TestCase):
def setUp(self):
self.port = getRandomPort()
self.server = legume.Server()
self.client = legume.Client()
def initEndpoints(self):
self.server.listen(('', self.port))
self.client.connect(('localhost', self.port))
def performUpdateLoop(self):
for i in range(60):
self.server.update()
self.client.update()
time.sleep(0.01)
def testKeepAliveClientWillDisconnect(self):
'''
Client will connect to Server but connection will timeout
and Client will go into an errored state.
'''
self.initEndpoints()
# Mismatched timeout causes client to bail on the connection early.
self.server.setTimeout(1.0)
self.client.setTimeout(0.25)
self.performUpdateLoop()
self.assertTrue(self.client.errored)
def testKeepAliveClientWillStayConnected(self):
'''
Client will stay connected to the server
'''
self.initEndpoints()
self.server.setTimeout(0.25)
self.client.setTimeout(0.25)
self.performUpdateLoop()
self.assertTrue(self.client.connected)
if __name__ == '__main__':
mytests = unittest.TestLoader().loadTestsFromTestCase(TestKeepAlive)
GreenBarRunner(verbosity=2).run(mytests)
|
franciscogarate/pyliferisk
|
refs/heads/master
|
Examples/Example_4_5b.py
|
1
|
#!/usr/bin/python
from pyliferisk import *
from pyliferisk.mortalitytables import INM05
import numpy as np
import pandas as pd
rfr = pd.read_excel('EIOPA_RFR_20161231_Term_Structures.xlsx', sheet_name='RFR_spot_no_VA',
skiprows=9, usecols='C:C', names=['Euro'])
tariff = Actuarial(nt=INM05, i=0.05)
reserve = MortalityTable(nt=INM05)
x = 32 # age
Cd = 3000 # capital death
Premium = Cd * Ax(tariff, 25) / annuity(tariff, 25, 'w', 0) #fixed at age 25
qx_vector = []
px_vector = []
for i in range(x,reserve.w + 1):
qx = ((reserve.lx[i]-reserve.lx[i+1]) / reserve.lx[x])
qx_vector.append(qx)
qx_sum = sum(qx_vector)
px_vector.append(1 - qx_sum)
def Reserve(i):
discount_factor = []
for y in range(0, reserve.w - x + 1):
if isinstance(i,float):
discount_factor.append(1 / (1 + i) ** (y + 1))
elif i == 'rfr':
discount_factor.append(1 / (1 + rfr['Euro'][y]) ** (y + 1))
APV_Premium = np.dot(Premium, px_vector)
APV_Claims = np.dot(Cd, qx_vector)
return np.dot(discount_factor, np.subtract(APV_Claims, APV_Premium)).round(2)
print(Reserve(0.0191))
print(Reserve(0.0139))
print(Reserve('rfr'))
|
hockeybuggy/pyactiveresource
|
refs/heads/master
|
pyactiveresource/__init__.py
|
386048
| |
sergeii/swat4stats.com
|
refs/heads/master
|
tracker/management/commands/importdata.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import)
import os
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from django import db
from django.utils import timezone
import mock
from julia import shortcuts, node
from julia.parse import QueryString
from cacheops.invalidation import invalidate_all
from tracker.models import Server
from tracker.definitions import stream_pattern_node
from tracker.signals import stream_data_received
from tracker import models
class InvalidServer(Exception):
pass
class Command(BaseCommand):
servers = {
'-==MYT Team Svr==-': models.Server.objects.get_or_create(ip='81.19.209.212', port=10480)[0],
'-==MYT Co-op Svr==-': models.Server.objects.get_or_create(ip='81.19.209.212', port=10880)[0],
'[C=EF2929]||ESA|| [C=A90E0E]Starship! [C=ffffff]2VK=Kick!': models.Server.objects.get_or_create(ip='193.192.58.147', port=10480)[0],
}
def handle(self, *args, **options):
self.count = 0
if not args:
raise CommandError('provide path to file')
# invalidate redis cache
invalidate_all()
with open(args[0]) as f:
for line in f:
line = line.strip()
if line:
self.parse_line(line)
def parse_line(self, line):
qs = QueryString().parse(line)
# expand querystring with either method
qs = (QueryString.expand_dots if any('.' in key for key in qs) else QueryString.expand_array)(qs)
try:
data = stream_pattern_node.parse(qs)
except node.ValueNodeError as e:
self.stdout.write(str(e))
else:
try:
models.Game.objects.get(tag=data['tag'].value)
except ObjectDoesNotExist:
pass
else:
self.stdout.write('%s exists' % data['tag'].value)
return
try:
round_date = self.parse_datetime(data['timestamp'].value)
with mock.patch.object(timezone, 'now') as mock_now:
mock_now.return_value = round_date
# fix distance
self.fix_distance(data)
# emit signal
stream_data_received.send(sender=None, data=data, server=self.servers[data['hostname'].value], raw=line)
except (db.utils.IntegrityError, KeyError) as e:
self.stdout.write(str(e))
else:
self.count += 1
self.stdout.write('#%d' % self.count)
db.reset_queries()
def parse_datetime(self, timestamp):
date = datetime.datetime.fromtimestamp(timestamp).replace(tzinfo=timezone.utc)
if date > datetime.datetime(2014, 3, 30, 1, 0, 0, tzinfo=timezone.utc):
date -= datetime.timedelta(seconds=3600)
return date
def fix_distance(self, data):
if data.get('players', None) is None:
return
if data['version'].value.split('.') > ['0', '1']:
return
for player in data['players']:
if player.get('weapons', None):
for weapon in player['weapons']:
weapon['distance'].value *= 100
|
HiSPARC/publicdb
|
refs/heads/master
|
publicdb/inforecords/migrations/0006_increase_country_name_length.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-29 23:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inforecords', '0005_fix_ip_fields_bug'),
]
operations = [
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(max_length=70, unique=True),
),
]
|
rtindru/django
|
refs/heads/master
|
tests/admin_views/models.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import tempfile
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
@python_2_unicode_compatible
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section, null=True, blank=True)
sub_section = models.ForeignKey(Section, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
def __str__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
model_year.short_description = ''
def model_year_reversed(self):
return self.date.year
model_year_reversed.admin_order_field = '-date'
model_year_reversed.short_description = ''
@python_2_unicode_compatible
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name='¿Name?')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name='¿Name?')
book = models.ForeignKey(Book)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name='¿Title?')
content = models.TextField()
book = models.ForeignKey(Book)
def __str__(self):
return self.title
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = '¿Chapter?'
@python_2_unicode_compatible
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra1: %s' % self.xtra
@python_2_unicode_compatible
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, verbose_name='¿Chap?')
xtra = models.CharField(max_length=100, verbose_name='¿Xtra?')
def __str__(self):
return '¿Xtra2: %s' % self.xtra
class RowLevelChangePermissionModel(models.Model):
name = models.CharField(max_length=100, blank=True)
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
@python_2_unicode_compatible
class ModelWithStringPrimaryKey(models.Model):
string_pk = models.CharField(max_length=255, primary_key=True)
def __str__(self):
return self.string_pk
def get_absolute_url(self):
return '/dummy/%s/' % self.string_pk
@python_2_unicode_compatible
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField(default=False)
def __str__(self):
return self.value
# we replicate Color to register with another ModelAdmin
class Color2(Color):
class Meta:
proxy = True
@python_2_unicode_compatible
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, limit_choices_to={'warm': True})
pub_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
title = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Inquisition(models.Model):
expected = models.BooleanField(default=False)
leader = models.ForeignKey(Actor)
country = models.CharField(max_length=20)
def __str__(self):
return "by %s from %s" % (self.leader, self.country)
@python_2_unicode_compatible
class Sketch(models.Model):
title = models.CharField(max_length=100)
inquisition = models.ForeignKey(Inquisition, limit_choices_to={'leader__name': 'Palin',
'leader__age': 27,
'expected': False,
})
defendant0 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': False}, related_name='as_defendant0')
defendant1 = models.ForeignKey(Actor, limit_choices_to={'title__isnull': True}, related_name='as_defendant1')
def __str__(self):
return self.title
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
@python_2_unicode_compatible
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
@python_2_unicode_compatible
class StumpJoke(models.Model):
variation = models.CharField(max_length=100)
most_recently_fooled = models.ForeignKey(Character, limit_choices_to=today_callable_dict, related_name="+")
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
def __str__(self):
return self.variation
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
@python_2_unicode_compatible
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
age = models.IntegerField(default=21)
alive = models.BooleanField(default=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, related_name="accounts")
servicename = 'generic service'
def __str__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = 'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = 'bar'
@python_2_unicode_compatible
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ('release_date',) # overridden in PodcastAdmin
class Vodcast(Media):
media = models.OneToOneField(Media, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class Parent(models.Model):
name = models.CharField(max_length=128)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False)
name = models.CharField(max_length=30, blank=True)
def clean(self):
if self.name == '_invalid':
raise ValidationError('invalid')
@python_2_unicode_compatible
class EmptyModel(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, related_name="pictures")
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector)
expensive = models.BooleanField(default=True)
@python_2_unicode_compatible
class Category(models.Model):
collector = models.ForeignKey(Collector)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __str__(self):
return '%s:o%s' % (self.id, self.order)
class Link(models.Model):
posted = models.DateField(
default=lambda: datetime.date.today() - datetime.timedelta(days=7)
)
url = models.URLField()
post = models.ForeignKey("Post")
class PrePopulatedPost(models.Model):
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
slug = models.SlugField()
class PrePopulatedSubPost(models.Model):
post = models.ForeignKey(PrePopulatedPost)
subtitle = models.CharField(max_length=100)
subslug = models.SlugField()
class Post(models.Model):
title = models.CharField(max_length=100, help_text="Some help text for the title (with unicode ŠĐĆŽćžšđ)")
content = models.TextField(help_text="Some help text for the content (with unicode ŠĐĆŽćžšđ)")
posted = models.DateField(
default=datetime.date.today,
help_text="Some help text for the date (with unicode ŠĐĆŽćžšđ)"
)
public = models.NullBooleanField()
def awesomeness_level(self):
return "Very awesome."
# Proxy model to test overridden fields attrs on Post model so as not to
# interfere with other tests.
class FieldOverridePost(Post):
class Meta:
proxy = True
@python_2_unicode_compatible
class Gadget(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Villain(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class SuperVillain(Villain):
pass
@python_2_unicode_compatible
class FunkyTag(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Plot(models.Model):
name = models.CharField(max_length=100)
team_leader = models.ForeignKey(Villain, related_name='lead_plots')
contact = models.ForeignKey(Villain, related_name='contact_plots')
tags = GenericRelation(FunkyTag)
def __str__(self):
return self.name
@python_2_unicode_compatible
class PlotDetails(models.Model):
details = models.CharField(max_length=100)
plot = models.OneToOneField(Plot)
def __str__(self):
return self.details
@python_2_unicode_compatible
class SecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
villain = models.ForeignKey(Villain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class SuperSecretHideout(models.Model):
""" Secret! Not registered with the admin! """
location = models.CharField(max_length=100)
supervillain = models.ForeignKey(SuperVillain)
def __str__(self):
return self.location
@python_2_unicode_compatible
class CyclicOne(models.Model):
name = models.CharField(max_length=25)
two = models.ForeignKey('CyclicTwo')
def __str__(self):
return self.name
@python_2_unicode_compatible
class CyclicTwo(models.Model):
name = models.CharField(max_length=25)
one = models.ForeignKey(CyclicOne)
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=20)
class Pizza(models.Model):
name = models.CharField(max_length=20)
toppings = models.ManyToManyField('Topping', related_name='pizzas')
class Album(models.Model):
owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
title = models.CharField(max_length=30)
class Employee(Person):
code = models.CharField(max_length=20)
class WorkHour(models.Model):
datum = models.DateField()
employee = models.ForeignKey(Employee)
class Question(models.Model):
question = models.CharField(max_length=20)
@python_2_unicode_compatible
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.PROTECT)
answer = models.CharField(max_length=20)
def __str__(self):
return self.answer
class Reservation(models.Model):
start_date = models.DateTimeField()
price = models.IntegerField()
DRIVER_CHOICES = (
('bill', 'Bill G'),
('steve', 'Steve J'),
)
RESTAURANT_CHOICES = (
('indian', 'A Taste of India'),
('thai', 'Thai Pography'),
('pizza', 'Pizza Mama'),
)
class FoodDelivery(models.Model):
reference = models.CharField(max_length=100)
driver = models.CharField(max_length=100, choices=DRIVER_CHOICES, blank=True)
restaurant = models.CharField(max_length=100, choices=RESTAURANT_CHOICES, blank=True)
class Meta:
unique_together = (("driver", "restaurant"),)
@python_2_unicode_compatible
class CoverLetter(models.Model):
author = models.CharField(max_length=30)
date_written = models.DateField(null=True, blank=True)
def __str__(self):
return self.author
class Paper(models.Model):
title = models.CharField(max_length=30)
author = models.CharField(max_length=30, blank=True, null=True)
class ShortMessage(models.Model):
content = models.CharField(max_length=140)
timestamp = models.DateTimeField(null=True, blank=True)
@python_2_unicode_compatible
class Telegram(models.Model):
title = models.CharField(max_length=30)
date_sent = models.DateField(null=True, blank=True)
def __str__(self):
return self.title
class Story(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class OtherStory(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
class ComplexSortedPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
is_employee = models.NullBooleanField()
class PluggableSearchPerson(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class PrePopulatedPostLargeSlug(models.Model):
"""
Regression test for #15938: a large max_length for the slugfield must not
be localized in prepopulated_fields_js.html or it might end up breaking
the javascript (ie, using THOUSAND_SEPARATOR ends up with maxLength=1,000)
"""
title = models.CharField(max_length=100)
published = models.BooleanField(default=False)
# `db_index=False` because MySQL cannot index large CharField (#21196).
slug = models.SlugField(max_length=1000, db_index=False)
class AdminOrderedField(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedModelMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
def some_order(self):
return self.order
some_order.admin_order_field = 'order'
class AdminOrderedAdminMethod(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
class AdminOrderedCallable(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@python_2_unicode_compatible
class Report(models.Model):
title = models.CharField(max_length=100)
def __str__(self):
return self.title
class MainPrepopulated(models.Model):
name = models.CharField(max_length=100)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(blank=True)
slug2 = models.SlugField(blank=True)
slug3 = models.SlugField(blank=True, allow_unicode=True)
class RelatedPrepopulated(models.Model):
parent = models.ForeignKey(MainPrepopulated)
name = models.CharField(max_length=75)
pubdate = models.DateField()
status = models.CharField(
max_length=20,
choices=(('option one', 'Option One'),
('option two', 'Option Two')))
slug1 = models.SlugField(max_length=50)
slug2 = models.SlugField(max_length=60)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #16819.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
class UndeletableObject(models.Model):
"""
Model whose show_delete in admin change_view has been disabled
Refs #10057.
"""
name = models.CharField(max_length=255)
class UnchangeableObject(models.Model):
"""
Model whose change_view is disabled in admin
Refs #20640.
"""
class UserMessenger(models.Model):
"""
Dummy class for testing message_user functions on ModelAdmin
"""
class Simple(models.Model):
"""
Simple model with nothing on it for use in testing
"""
class Choice(models.Model):
choice = models.IntegerField(blank=True, null=True,
choices=((1, 'Yes'), (0, 'No'), (None, 'No opinion')))
class ParentWithDependentChildren(models.Model):
"""
Issue #20522
Model where the validation of child foreign-key relationships depends
on validation of the parent
"""
some_required_info = models.PositiveIntegerField()
family_name = models.CharField(max_length=255, blank=False)
class DependentChild(models.Model):
"""
Issue #20522
Model that depends on validation of the parent class for one of its
fields to validate during clean
"""
parent = models.ForeignKey(ParentWithDependentChildren)
family_name = models.CharField(max_length=255)
class _Manager(models.Manager):
def get_queryset(self):
return super(_Manager, self).get_queryset().filter(pk__gt=1)
class FilteredManager(models.Model):
def __str__(self):
return "PK=%d" % self.pk
pk_gt_1 = _Manager()
objects = models.Manager()
class EmptyModelVisible(models.Model):
""" See ticket #11277. """
class EmptyModelHidden(models.Model):
""" See ticket #11277. """
class EmptyModelMixin(models.Model):
""" See ticket #11277. """
class State(models.Model):
name = models.CharField(max_length=100)
class City(models.Model):
state = models.ForeignKey(State)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Restaurant(models.Model):
city = models.ForeignKey(City)
name = models.CharField(max_length=100)
def get_absolute_url(self):
return '/dummy/%s/' % self.pk
class Worker(models.Model):
work_at = models.ForeignKey(Restaurant)
name = models.CharField(max_length=50)
surname = models.CharField(max_length=50)
# Models for #23329
class ReferencedByParent(models.Model):
name = models.CharField(max_length=20, unique=True)
class ParentWithFK(models.Model):
fk = models.ForeignKey(
ReferencedByParent, to_field='name', related_name='hidden+',
)
class ChildOfReferer(ParentWithFK):
pass
# Models for #23431
class ReferencedByInline(models.Model):
name = models.CharField(max_length=20, unique=True)
class InlineReference(models.Model):
fk = models.ForeignKey(
ReferencedByInline, to_field='name', related_name='hidden+',
)
class InlineReferer(models.Model):
refs = models.ManyToManyField(InlineReference)
# Models for #23604 and #23915
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
recipes = models.ManyToManyField(Recipe, through='RecipeIngredient')
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, to_field='iname')
recipe = models.ForeignKey(Recipe, to_field='rname')
# Model for #23839
class NotReferenced(models.Model):
# Don't point any FK at this model.
pass
# Models for #23934
class ExplicitlyProvidedPK(models.Model):
name = models.IntegerField(primary_key=True)
class ImplicitlyGeneratedPK(models.Model):
name = models.IntegerField(unique=True)
|
monkeyi/monkeyfarm
|
refs/heads/master
|
www/app.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging; logging.basicConfig(level=logging.INFO)
#import pdb
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
import orm
from coroweb import add_routes, add_static, add_route
from handlers import cookie2user, COOKIE_NAME
def init_jinja2(app, **kw):
logging.info('init jinja2..., kw = %s', (kw))
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
# 日志拦截器,当发起请求时,app会调用该函数并传入response_factory作为handler
# 在调用url处理函数之前会记录当前处理的请求方法和路径
async def logger_factory(app, handler):
async def logger(request):
logging.info('logger_factor, Request: %s %s, handler: %s app: %s' % (request.method, request.path, handler, app))
return (await handler(request))
return logger
async def auth_factory(app, handler):
async def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
logging.info('cookie_str = %s', cookie_str)
if cookie_str:
user = await cookie2user(cookie_str)
logging.info('await cookie2user,user = %s', user)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return await handler(request)
return auth
# 请求拦截器,当发起请求时,会被app调用并传入
# 该函数hh用于拦截post请求将请求体数据正确的保存到request.__data__中
async def data_factory(app, hanlder):
async def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = await request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = await request.post()
logging.info('request form: %s' % str(request.__data__))
return (await handler(request))
return parse_data
# response 拦截器, app在请求后会调用该函数,并传入add_router里面的url处理函数作为handler
# 该函数主要用于将实际处理函数的返回值此时为dict,转换成aiohttp框架需要的web.Response对象返回
async def response_factory(app, handler):
async def response(request):
logging.info('response_factory run, handler = %s, request=%s' % (handler, request))
r = await handler(request)
logging.info('response_factory after handler result = %s', r)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and r >= 100 and r < 600:
return web.Response(r)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
#default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
def index(request):
# pdb.set_trace()
return web.Response(body=b'<h1>Welcome to MonkeyFarm</h1>', content_type='text/html', charset='UTF-8')
async def init(loop):
await orm.create_pool(loop=loop, host='127.0.0.1', port=3306, user='root', password='111111', db='monkeyfarm')
# 创建应用程序对象, 并指定拦截器
# app = web.Application(loop=loop)
# 发起一个请求的程序执行步骤:
# 发起请求-> app回调logger_factory(app, handler=response_factory) 该函数主要添加日志 -> response_factory(app, handler=RequestHandler), RequestHandler为app.add_router上的处理函数,它负责解析request参数给实际的url处理函数使用, 该response_factory函数先调用handler(request)得到处理后的结果,此时为dict对象,然后将dict转成web.Response对象以满足aiohttp框架的要求, 此web.Response为jinja2模板数据.
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
# 初始化模板
init_jinja2(app, filters=dict(datetime=datetime_filter))
# 将handler模块的url处理函数注册到app中
add_routes(app, 'handlers')
#index.__route__ = '/'
#index.__method__ = 'GET'
#add_route(app, index)
add_static(app)
# 创建tcp服务器
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000')
return srv
# app = web.Application(loop=loop)
# app.router.add_route('GET', '/', index)
# srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)
# logging.info('server started at http://127.0.0.1:9000...')
# return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
xfournet/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyAugmentAssignmentQuickFixTest/simple_after.py
|
83
|
var += 3
|
nebril/fuel-web
|
refs/heads/master
|
nailgun/nailgun/settings.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from nailgun.logger import logger
class NailgunSettings(object):
def __init__(self):
settings_files = []
logger.debug("Looking for settings.yaml package config "
"using old style __file__")
project_path = os.path.dirname(__file__)
project_settings_file = os.path.join(project_path, 'settings.yaml')
settings_files.append(project_settings_file)
settings_files.append('/etc/nailgun/settings.yaml')
version_paths = ["/etc/fuel/version.yaml",
"/etc/fuel/nailgun/version.yaml",
"/etc/nailgun/version.yaml"]
for path in version_paths:
if os.access(path, os.R_OK):
settings_files.append(path)
break
else:
logger.error("'version.yaml' config file is not found")
test_config = os.environ.get('NAILGUN_CONFIG')
if test_config:
settings_files.append(test_config)
self.config = {}
for sf in settings_files:
try:
logger.debug("Trying to read config file %s" % sf)
self.update_from_file(sf)
except Exception as e:
logger.error("Error while reading config file %s: %s" %
(sf, str(e)))
if int(self.config.get("DEVELOPMENT")):
logger.info("DEVELOPMENT MODE ON:")
here = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')
)
self.config.update({
'STATIC_DIR': os.path.join(here, 'static'),
'TEMPLATE_DIR': os.path.join(here, 'static')
})
logger.info("Static dir is %s" % self.config.get("STATIC_DIR"))
logger.info("Template dir is %s" % self.config.get("TEMPLATE_DIR"))
def update(self, dct):
self.config.update(dct)
def update_from_file(self, path):
with open(path, "r") as custom_config:
self.config.update(
yaml.load(custom_config.read())
)
def dump(self):
return yaml.dump(self.config)
def __getattr__(self, name):
return self.config.get(name, None)
def __repr__(self):
return "<settings object>"
settings = NailgunSettings()
|
narthollis/eve-shoppinglist
|
refs/heads/master
|
shoppinglist/dns/reversename.py
|
75
|
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Reverse Map Names.
@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
@type ipv4_reverse_domain: dns.name.Name object
@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
@type ipv6_reverse_domain: dns.name.Name object
"""
import dns.name
import dns.ipv6
import dns.ipv4
ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
def from_address(text):
"""Convert an IPv4 or IPv6 address in textual form into a Name object whose
value is the reverse-map domain name of the address.
@param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
'::1')
@type text: str
@rtype: dns.name.Name object
"""
try:
parts = list(dns.ipv6.inet_aton(text).encode('hex_codec'))
origin = ipv6_reverse_domain
except:
parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)]
origin = ipv4_reverse_domain
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_address(name):
"""Convert a reverse map domain name into textual address form.
@param name: an IPv4 or IPv6 address in reverse-map form.
@type name: dns.name.Name object
@rtype: str
"""
if name.is_subdomain(ipv4_reverse_domain):
name = name.relativize(ipv4_reverse_domain)
labels = list(name.labels)
labels.reverse()
text = '.'.join(labels)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
elif name.is_subdomain(ipv6_reverse_domain):
name = name.relativize(ipv6_reverse_domain)
labels = list(name.labels)
labels.reverse()
parts = []
i = 0
l = len(labels)
while i < l:
parts.append(''.join(labels[i:i+4]))
i += 4
text = ':'.join(parts)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
else:
raise dns.exception.SyntaxError('unknown reverse-map address family')
|
JorgeCoock/django
|
refs/heads/master
|
setup.py
|
195
|
import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
extras_require={
"bcrypt": ["bcrypt"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
|
AnishShah/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/python/ops/tensor_forest_ops.py
|
166
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom ops used by tensorforest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.tensor_forest.python.ops.gen_tensor_forest_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
_tensor_forest_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_tensor_forest_ops.so'))
|
ejeschke/ginga
|
refs/heads/master
|
ginga/opengl/glsl/req.py
|
3
|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
# OpenGL version requirements to use these shaders
major = 4
minor = 5
|
Deepakkothandan/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_team.py
|
15
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_team
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower team.
description:
- Create, update, or destroy Ansible Tower teams. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the team.
required: True
default: null
organization:
description:
- Organization the team should be made a member of.
required: True
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Create tower team
tower_team:
name: Team Name
description: Team Description
organization: test-org
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
description=dict(),
organization=dict(required=True),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
state = module.params.get('state')
json_output = {'team': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
team = tower_cli.get_resource('team')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = team.modify(name=name, organization=org['id'],
description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = team.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update team, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update team: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
sigma-random/asuswrt-merlin
|
refs/heads/master
|
release/src/router/asusnatnl/pjproject-1.12/tests/pjsua/scripts-run/100_simple.py
|
59
|
# $Id: 100_simple.py 2028 2008-06-16 13:04:44Z bennylp $
#
# Just about the simple pjsua command line parameter, which should
# never fail in any circumstances
from inc_cfg import *
test_param = TestParam(
"Basic run",
[
InstanceParam("pjsua", "--null-audio --rtp-port 0")
]
)
|
praba230890/frappe
|
refs/heads/develop
|
frappe/model/docfield.py
|
61
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""docfield utililtes"""
import frappe
def rename(doctype, fieldname, newname):
"""rename docfield"""
df = frappe.db.sql("""select * from tabDocField where parent=%s and fieldname=%s""",
(doctype, fieldname), as_dict=1)
if not df:
return
df = df[0]
if frappe.db.get_value('DocType', doctype, 'issingle'):
update_single(df, newname)
else:
update_table(df, newname)
update_parent_field(df, newname)
def update_single(f, new):
"""update in tabSingles"""
frappe.db.begin()
frappe.db.sql("""update tabSingles set field=%s where doctype=%s and field=%s""",
(new, f['parent'], f['fieldname']))
frappe.db.commit()
def update_table(f, new):
"""update table"""
query = get_change_column_query(f, new)
if query:
frappe.db.sql(query)
def update_parent_field(f, new):
"""update 'parentfield' in tables"""
if f['fieldtype']=='Table':
frappe.db.begin()
frappe.db.sql("""update `tab%s` set parentfield=%s where parentfield=%s""" \
% (f['options'], '%s', '%s'), (new, f['fieldname']))
frappe.db.commit()
def get_change_column_query(f, new):
"""generate change fieldname query"""
desc = frappe.db.sql("desc `tab%s`" % f['parent'])
for d in desc:
if d[0]== f['fieldname']:
return 'alter table `tab%s` change `%s` `%s` %s' % \
(f['parent'], f['fieldname'], new, d[1])
|
40123210/w17b_exam
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/string.py
|
734
|
"""A collection of string constants.
Public module variables:
whitespace -- a string containing all ASCII whitespace
ascii_lowercase -- a string containing all ASCII lowercase letters
ascii_uppercase -- a string containing all ASCII uppercase letters
ascii_letters -- a string containing all ASCII letters
digits -- a string containing all ASCII decimal digits
hexdigits -- a string containing all ASCII hexadecimal digits
octdigits -- a string containing all ASCII octal digits
punctuation -- a string containing all ASCII punctuation characters
printable -- a string containing all ASCII characters considered printable
"""
import _string
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
####################################################################
import re as _re
from collections import ChainMap
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, cls.flags | _re.VERBOSE)
class Template(metaclass=_TemplateMetaclass):
"""A string class for supporting $-substitutions."""
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
flags = _re.IGNORECASE
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(keepends=True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = ChainMap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return mo.group()
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return mo.group()
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str.
# The overall parser is implemented in _string.formatter_parser.
# The field name parser is implemented in _string.formatter_field_name_split
class Formatter:
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, int):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion is None:
return value
elif conversion == 's':
return str(value)
elif conversion == 'r':
return repr(value)
elif conversion == 'a':
return ascii(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return _string.formatter_parser(format_string)
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
|
camptocamp/ngo-addons-backport
|
refs/heads/master
|
addons/account_check_writing/__openerp__.py
|
58
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Check Writing',
'version': '1.1',
'author': 'OpenERP SA, NovaPoint Group',
'category': 'Generic Modules/Accounting',
'description': """
Module for the Check Writing and Check Printing.
================================================
""",
'website': 'http://www.openerp.com',
'depends' : ['account_voucher'],
'data': [
'wizard/account_check_batch_printing_view.xml',
'account_check_writing_report.xml',
'account_view.xml',
'account_voucher_view.xml',
'account_check_writing_data.xml',
],
'demo': ['account_demo.xml'],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.