repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
bijanbrahma/SARA
|
refs/heads/master
|
Pi_navigatorClient.py
|
1
|
import socket
import time
import RPi.GPIO as gpio
import sys
host = 'xxx.xxx.xx.xxx' #address of the pi in the Lan, find through ifonfig
port = xxxx #random unused port, ie above 1000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host,port)) #socket for getting data from surf program..ie centroid
host2 = 'xxx.xxx.xx.xx' #address of the pi in the Lan, find through ifonfig
port2 = xxx #random unused port, diff from previous one , ie above 1000
e = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
e.bind((host2,port2)) #socket for getting distance from ultransonic sensor program
e.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1) #sets the buffer queue length to 1, ie. dont need to save previous values more than one in number
def init():
gpio.setmode(gpio.BOARD)
gpio.setup(19, gpio.OUT)
gpio.setup(11, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(15, gpio.OUT)
def rotateleft(tf):
init()
gpio.output(19,True)
gpio.output(11,False)
gpio.output(13,True)
gpio.output(15,False)
time.sleep(tf)
gpio.cleanup()
def rotateright(tf):
init()
gpio.output(19,False)
gpio.output(11,True)
gpio.output(13,False)
gpio.output(15,True)
time.sleep(tf)
gpio.cleanup()
def foreward(tf):
init()
gpio.output(19,False)
gpio.output(11,False)
gpio.output(13,True)
gpio.output(15,True)
time.sleep(tf)
gpio.cleanup()
def search():
while True:
print "searching .."
rotateright(0.030)
data, addr = s.recvfrom(1024) #get centroid form surf program as string
end = len(data)
clon = data[1:(end-1)]
p, q = map(float, clon.split(',')) # string decoded into float
if (p == -1)&(q == -1):
pass #if object not found do nothing and go to next iteration
else: #if object found start to verify
count = 0
r = time.time()
t = time.time()
while t-r < 1.5: #verification period = 1.5 sec, t=stop, r=start times respectively
print "verifying"
data, addr = s.recvfrom(1024)
end = len(data)
clon = data[1:(end-1)]
p, q = map(float, clon.split(','))
if p!=-1 and q!=-1: #object found
count = count+1 #increment count if obj found
t = time.time() #stop time increases till t-r < 1.5, ie. verification runs till 1.5 sec
if count > 5: #if object found more than 5 time in verify period, search phase complete
print "verified"
break
else:
pass
return 2
def allignment():
flag = 0
lag = 0
while True:
data, addr = s.recvfrom(1024)
end = len(data)
clon = data[1:(end-1)]
p, q = map(float, clon.split(','))
if p ==-1 and flag==0: #if obj dissapears durng allignment start timer
start = time.time()
flag = 1
if p ==-1 and flag!=0: #if object not found still , increment counter
stop = time.time()
lag = stop - start # lag gives time elapsed for which object dosent appear once
if lag > 5: #if object dissapears for more than 5 sec search again
return 1
if p<283.0 and p!=-1:
print "rotating left"
rotateleft(0.030)
start = time.time()
flag = 0 #if the object appears even once reset timer
if p>339.0:
print "rotating right"
rotateright(0.030)
start = time.time()
flag = 0
if 283 < p < 339:
print 'aligned'
start = time.time()
break #if alligned break and go to ram(), to move closer to the target
return 3
def ram():
datak, adres= e.recvfrom(1024)
d = float(datak) #get distance from ultrasonic sensor program
print d
flag=0
lg=0
while d>10:
data, addr = s.recvfrom(1024)
end = len(data)
clon = data[1:(end-1)]
p, q = map(float, clon.split(','))
datak, adres = e.recvfrom(1024)
d = float(datak)
print d
if 283 < p < 339: #if still alligned move foreward
foreward(0.030)
flag=0
elif not(283 < p < 339) and p!=-1: #if allignment lost goto allignment()
r = allignment()
flag=0
else: #if object lost for more than 5 sec go back to search()
if flag==0:
st = time.time()
flag=1
else:
en = time.time()
lg = en-st
if lg>5:
return 1
return 4
def pickup(): #pickup rputine to be written
print "pickup routine"
time.sleep(5)
return 5
def fetch():
print "fetching routine"
time.sleep(5)
return 6
def Main():
c = 1
try:
while True:
if c==1:
c = search()
if c==2:
c = allignment()
if c==3:
c = ram()
if c==4:
c = pickup()
if c==5:
c = fetch()
if c==6:
s.close()
e.close()
gpio.cleanup()
print "port closed"
break
except KeyboardInterrupt:
s.close()
e.close()
gpio.cleanup()
print "port closed"
if __name__=='__main__':
Main()
|
endlessm/chromium-browser
|
refs/heads/master
|
tools/style_variable_generator/views_generator_test.py
|
1
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from views_generator import ViewsStyleGenerator
import unittest
class ViewsStyleGeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = ViewsStyleGenerator()
def assertEqualToFile(self, value, filename):
with open(filename) as f:
contents = f.read()
self.assertEqual(
value, contents,
'\n>>>>>\n%s<<<<<\n\ndoes not match\n\n>>>>>\n%s<<<<<' %
(value, contents))
def testColorTestJSON(self):
self.generator.AddJSONFileToModel('colors_test.json5')
self.generator.out_file_path = (
'tools/style_variable_generator/colors_test_expected.h')
self.assertEqualToFile(self.generator.Render(),
'colors_test_expected.h')
if __name__ == '__main__':
unittest.main()
|
dmitriy0611/django
|
refs/heads/master
|
scripts/manage_translations.py
|
277
|
#!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from argparse import ArgumentParser
from subprocess import PIPE, Popen, call
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep '^[-+]msgid' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip())
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
if resources is not None:
print("`update_catalogs` will always process all resources.")
contrib_dirs = _get_locale_dirs(None, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating en catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'])
print("Updating en JS catalogs for Django and contrib apps...")
call_command('makemessages', locale=['en'], domain='djangojs')
# Output changed stats
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
for name, dir_ in contrib_dirs:
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and lang not in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
target_langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
target_langs = languages
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in target_langs:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat --no-location -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = ArgumentParser()
parser.add_argument('cmd', nargs=1)
parser.add_argument("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action='append',
help="limit operation to the specified languages")
options = parser.parse_args()
if options.cmd[0] in RUNABLE_SCRIPTS:
eval(options.cmd[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
|
noironetworks/python-neutronclient
|
refs/heads/master
|
neutronclient/tests/unit/bgp/test_cli20_dragentscheduler.py
|
6
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.bgp import dragentscheduler as bgp_drsched
from neutronclient.tests.unit import test_cli20
from neutronclient.tests.unit import test_cli20_agentschedulers as test_as
BGP_DRAGENT_ID = 'bgp_dragent_id1'
BGP_SPEAKER = 'bgp_speaker_id1'
class CLITestV20DRAgentScheduler(test_as.CLITestV20AgentScheduler):
def test_add_bgp_speaker_to_dragent(self):
resource = 'agent'
cmd = bgp_drsched.AddBGPSpeakerToDRAgent(
test_cli20.MyApp(sys.stdout), None)
args = (BGP_DRAGENT_ID, BGP_SPEAKER)
body = {'bgp_speaker_id': BGP_SPEAKER}
result = {'bgp_speaker_id': 'bgp_speaker_id', }
self._test_add_to_agent(resource, cmd, args,
self.client.BGP_DRINSTANCES,
body, result)
def test_remove_bgp_speaker_from_dragent(self):
resource = 'agent'
cmd = bgp_drsched.RemoveBGPSpeakerFromDRAgent(
test_cli20.MyApp(sys.stdout), None)
args = (BGP_DRAGENT_ID, BGP_SPEAKER)
self._test_remove_from_agent(resource, cmd, args,
self.client.BGP_DRINSTANCES)
def test_list_bgp_speakers_on_dragent(self):
resources = 'bgp_speakers'
cmd = bgp_drsched.ListBGPSpeakersOnDRAgent(
test_cli20.MyApp(sys.stdout), None)
path = ((self.client.agent_path + self.client.BGP_DRINSTANCES) %
BGP_DRAGENT_ID)
self._test_list_resources(resources, cmd, base_args=[BGP_DRAGENT_ID],
path=path)
def test_list_dragents_hosting_bgp_speaker(self):
resources = 'agent'
cmd = bgp_drsched.ListDRAgentsHostingBGPSpeaker(
test_cli20.MyApp(sys.stdout), None)
path = ((self.client.bgp_speaker_path + self.client.BGP_DRAGENTS) %
BGP_DRAGENT_ID)
contents = {self.id_field: 'myid1', 'alive': True}
self._test_list_resources(resources, cmd, base_args=[BGP_DRAGENT_ID],
path=path, response_contents=contents)
|
ShineFan/odoo
|
refs/heads/8.0
|
addons/mrp/wizard/stock_move.py
|
186
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_consume(osv.osv_memory):
_name = "stock.move.consume"
_description = "Consume Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
#TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'product_qty' in fields:
res.update({'product_qty': move.product_uom_qty})
if 'location_id' in fields:
res.update({'location_id': move.location_id.id})
return res
def do_move_consume(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
production_obj = self.pool.get('mrp.production')
move_ids = context['active_ids']
move = move_obj.browse(cr, uid, move_ids[0], context=context)
production_id = move.raw_material_production_id.id
production = production_obj.browse(cr, uid, production_id, context=context)
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
for data in self.browse(cr, uid, ids, context=context):
qty = uom_obj._compute_qty(cr, uid, data['product_uom'].id, data.product_qty, data.product_id.uom_id.id)
remaining_qty = move.product_qty - qty
#check for product quantity is less than previously planned
if float_compare(remaining_qty, 0, precision_digits=precision) >= 0:
move_obj.action_consume(cr, uid, move_ids, qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
else:
consumed_qty = min(move.product_qty, qty)
new_moves = move_obj.action_consume(cr, uid, move_ids, consumed_qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
#consumed more in wizard than previously planned
extra_more_qty = qty - consumed_qty
#create new line for a remaining qty of the product
extra_move_id = production_obj._make_consume_line_from_data(cr, uid, production, data.product_id, data.product_id.uom_id.id, extra_more_qty, False, 0, context=context)
move_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': data.restrict_lot_id.id}, context=context)
move_obj.action_done(cr, uid, [extra_move_id], context=context)
return {'type': 'ir.actions.act_window_close'}
|
wojons/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/many-actions/gyptest-many-actions-unsorted.py
|
244
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure lots of actions in the same target don't cause exceeding command
line length.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('many-actions-unsorted.gyp')
test.build('many-actions-unsorted.gyp', test.ALL)
for i in range(15):
test.built_file_must_exist('generated_%d.h' % i)
# Make sure the optimized cygwin setup doesn't cause problems for incremental
# builds.
test.touch('file1')
test.build('many-actions-unsorted.gyp', test.ALL)
test.touch('file0')
test.build('many-actions-unsorted.gyp', test.ALL)
test.touch('file2')
test.touch('file3')
test.touch('file4')
test.build('many-actions-unsorted.gyp', test.ALL)
test.pass_test()
|
thdtjsdn/FreeCAD
|
refs/heads/master
|
src/Mod/MeshPart/Init.py
|
55
|
# FreeCAD init script of the MeshPart module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
|
gurneyalex/OpenUpgrade
|
refs/heads/master
|
addons/mrp_operations/report/mrp_code_barcode.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bitcity/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/__init__.py
|
12133432
| |
delhivery/django
|
refs/heads/master
|
django/contrib/redirects/migrations/__init__.py
|
12133432
| |
dsajkl/123
|
refs/heads/master
|
lms/djangoapps/shoppingcart/tests/__init__.py
|
12133432
| |
nutztherookie/wagtail
|
refs/heads/master
|
wagtail/wagtailsnippets/views/__init__.py
|
12133432
| |
ericzolf/ansible
|
refs/heads/devel
|
hacking/build_library/build_ansible/__init__.py
|
12133432
| |
cgstudiomap/cgstudiomap
|
refs/heads/develop
|
main/eggs/Shapely-1.5.9-py2.7.egg/shapely/algorithms/__init__.py
|
12133432
| |
mpvoss/RickAndMortyWeatherTweets
|
refs/heads/master
|
env/lib/python3.5/site-packages/pip/_vendor/cachecontrol/filewrapper.py
|
346
|
from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__('_CallbackFileWrapper__fp')
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b'\r\n':
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
|
thomasgilgenast/gilgistatus-nonrel
|
refs/heads/master
|
django/contrib/localflavor/it/it_province.py
|
406
|
# -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
0x7678/gr-gsm
|
refs/heads/master
|
python/qa_msg_to_tag.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @file
# @author (C) 2016 by Piotr Krysik <ptrkrysik@gmail.com>
# @section LICENSE
#
# Gr-gsm is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# Gr-gsm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gr-gsm; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import grgsm_swig as grgsm
class qa_msg_to_tag (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_msg_to_tag)
|
fly19890211/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/index.py
|
102
|
"""
Studio Home page
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class DashboardPage(PageObject):
"""
Studio Home page
"""
url = BASE_URL + "/course/"
def is_browser_on_page(self):
return self.q(css='body.view-dashboard').present
@property
def course_runs(self):
"""
The list of course run metadata for all displayed courses
Returns an empty string if there are none
"""
return self.q(css='.course-run>.value').text
@property
def has_processing_courses(self):
return self.q(css='.courses-processing').present
def create_rerun(self, display_name):
"""
Clicks the create rerun link of the course specified by display_name.
"""
name = self.q(css='.course-title').filter(lambda el: el.text == display_name)[0]
name.find_elements_by_xpath('../..')[0].find_elements_by_class_name('rerun-button')[0].click()
def click_course_run(self, run):
"""
Clicks on the course with run given by run.
"""
self.q(css='.course-run .value').filter(lambda el: el.text == run)[0].click()
def has_new_library_button(self):
"""
(bool) is the "New Library" button present?
"""
return self.q(css='.new-library-button').present
def click_new_library(self):
"""
Click on the "New Library" button
"""
self.q(css='.new-library-button').click()
def is_new_library_form_visible(self):
"""
Is the new library form visisble?
"""
return self.q(css='.wrapper-create-library').visible
def fill_new_library_form(self, display_name, org, number):
"""
Fill out the form to create a new library.
Must have called click_new_library() first.
"""
field = lambda fn: self.q(css='.wrapper-create-library #new-library-{}'.format(fn))
field('name').fill(display_name)
field('org').fill(org)
field('number').fill(number)
def is_new_library_form_valid(self):
"""
IS the new library form ready to submit?
"""
return (
self.q(css='.wrapper-create-library .new-library-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-library .wrap-error.is-shown').present
)
def submit_new_library_form(self):
"""
Submit the new library form.
"""
self.q(css='.wrapper-create-library .new-library-save').click()
def list_courses(self):
"""
List all the courses found on the page's list of libraries.
"""
# Workaround Selenium/Firefox bug: `.text` property is broken on invisible elements
course_tab_link = self.q(css='#course-index-tabs .courses-tab a')
if course_tab_link:
course_tab_link.click()
div2info = lambda element: {
'name': element.find_element_by_css_selector('.course-title').text,
'org': element.find_element_by_css_selector('.course-org .value').text,
'number': element.find_element_by_css_selector('.course-num .value').text,
'run': element.find_element_by_css_selector('.course-run .value').text,
'url': element.find_element_by_css_selector('a.course-link').get_attribute('href'),
}
return self.q(css='.courses li.course-item').map(div2info).results
def list_libraries(self):
"""
Click the tab to display the available libraries, and return detail of them.
"""
# Workaround Selenium/Firefox bug: `.text` property is broken on invisible elements
self.q(css='#course-index-tabs .libraries-tab a').click()
if self.q(css='.list-notices.libraries-tab').present:
# No libraries are available.
self.wait_for_element_visibility('.libraries-tab .new-library-button', "Switch to library tab")
return []
div2info = lambda element: {
'name': element.find_element_by_css_selector('.course-title').text,
'org': element.find_element_by_css_selector('.course-org .value').text,
'number': element.find_element_by_css_selector('.course-num .value').text,
'url': element.find_element_by_css_selector('a.library-link').get_attribute('href'),
}
self.wait_for_element_visibility('.libraries li.course-item', "Switch to library tab")
return self.q(css='.libraries li.course-item').map(div2info).results
def has_library(self, **kwargs):
"""
Does the page's list of libraries include a library matching kwargs?
"""
for lib in self.list_libraries():
if all([lib[key] == kwargs[key] for key in kwargs]):
return True
return False
|
theheros/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/multiprocessing/util.py
|
3
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging, atexit
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
|
sirmmo/umarells.org
|
refs/heads/master
|
cityfix/migrations/0006_auto__chg_field_pics_pic.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Pics.pic'
db.alter_column(u'cityfix_pics', 'pic', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
def backwards(self, orm):
# Changing field 'Pics.pic'
db.alter_column(u'cityfix_pics', 'pic', self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cityfix.cityfix': {
'Meta': {'object_name': 'CityFix'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixtype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cityfix.FixType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infrastructure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cityfix.Infrastructure']"}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cityfix.Operator']"}),
'sent': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sitetype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cityfix.SiteType']"}),
'umarell': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Umarell']", 'null': 'True'}),
'uuid': ('django.db.models.fields.TextField', [], {'default': "'f6c6b105-ddc8-40e2-977d-6de930f8d093'", 'unique': 'True'})
},
u'cityfix.fixtype': {
'Meta': {'object_name': 'FixType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'name_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'cityfix.infrastructure': {
'Meta': {'object_name': 'Infrastructure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'name_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'cityfix.operator': {
'Meta': {'object_name': 'Operator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'name_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'cityfix.pics': {
'Meta': {'object_name': 'Pics'},
'fix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pics'", 'to': u"orm['cityfix.CityFix']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'})
},
u'cityfix.sitetype': {
'Meta': {'object_name': 'SiteType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'name_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.topic': {
'Meta': {'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
u'core.umarell': {
'Meta': {'object_name': 'Umarell'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Topic']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['cityfix']
|
MakeHer/edx-platform
|
refs/heads/dashboard.2
|
common/djangoapps/external_auth/login_and_register.py
|
150
|
"""Intercept login and registration requests.
This module contains legacy code originally from `student.views`.
"""
import re
from django.conf import settings
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import external_auth.views
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
# pylint: disable=fixme
# TODO: This function is kind of gnarly/hackish/etc and is only used in one location.
# It'd be awesome if we could get rid of it; manually parsing course_id strings form larger strings
# seems Probably Incorrect
def _parse_course_id_from_string(input_str):
"""
Helper function to determine if input_str (typically the queryparam 'next') contains a course_id.
@param input_str:
@return: the course_id if found, None if not
"""
m_obj = re.match(r'^/courses/{}'.format(settings.COURSE_ID_PATTERN), input_str)
if m_obj:
return CourseKey.from_string(m_obj.group('course_id'))
return None
def _get_course_enrollment_domain(course_id):
"""
Helper function to get the enrollment domain set for a course with id course_id
@param course_id:
@return:
"""
course = modulestore().get_course(course_id)
if course is None:
return None
return course.enrollment_domain
def login(request):
"""Allow external auth to intercept and handle a login request.
Arguments:
request (Request): A request for the login page.
Returns:
Response or None
"""
# Default to a `None` response, indicating that external auth
# is not handling the request.
response = None
if settings.FEATURES['AUTH_USE_CERTIFICATES'] and external_auth.views.ssl_get_cert_from_request(request):
# SSL login doesn't require a view, so redirect
# branding and allow that to process the login if it
# is enabled and the header is in the request.
response = external_auth.views.redirect_with_get('root', request.GET)
elif settings.FEATURES.get('AUTH_USE_CAS'):
# If CAS is enabled, redirect auth handling to there
response = redirect(reverse('cas-login'))
elif settings.FEATURES.get('AUTH_USE_SHIB'):
redirect_to = request.GET.get('next')
if redirect_to:
course_id = _parse_course_id_from_string(redirect_to)
if course_id and _get_course_enrollment_domain(course_id):
response = external_auth.views.course_specific_login(request, course_id.to_deprecated_string())
return response
def register(request):
"""Allow external auth to intercept and handle a registration request.
Arguments:
request (Request): A request for the registration page.
Returns:
Response or None
"""
response = None
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to branding to process their certificate if SSL is enabled
# and registration is disabled.
response = external_auth.views.redirect_with_get('root', request.GET)
return response
|
sunqb/oa_qian
|
refs/heads/master
|
flask/Lib/site-packages/flask_sqlalchemy/_compat.py
|
175
|
# -*- coding: utf-8 -*-
"""
flaskext.sqlalchemy._compat
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Internal Python 2.x/3.x compatibility layer.
:copyright: (c) 2013 by Daniel Neuhäuser
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
xrange = xrange
string_types = (unicode, bytes)
else:
def iteritems(d):
return iter(d.items())
def itervalues(d):
return iter(d.values())
xrange = range
string_types = (str, )
|
studoret/miclooper
|
refs/heads/master
|
miclooper/utils/task.py
|
1
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
task
############
Copyright 2016 Stephane Tudoret
This file is part of miclooper, a python micro looper application.
miclooper is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
miclooper is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with miclooper. If not, see http://www.gnu.org/licenses/.
"""
import sys
import os
import subprocess
class Task(object):
"""
Task.
:Args:
command : Array of string
Command to exectute in background.
e.g : ['/usr/bin/mplayer', '-loop', '0', 'toto.raw'])
debug : boolean
If true then show stderr of the command
else hide stderr
"""
def __init__(self, command, debug=False):
self._command = command
self._process = None
self._fdevnull = open(os.devnull, 'w')
if debug is True:
self._fstderr = None
else:
self._fstderr = subprocess.STDOUT
def start(self):
""" Execute the task in background """
try:
self._process = subprocess.Popen(
self._command,
bufsize=-1,
stdout=self._fdevnull,
stderr=self._fstderr
)
except OSError as err:
print 'error: %s' % err
sys.exit(1)
def stop(self):
""" Terminate the task """
if self._process != None:
self._process.terminate()
self._process = None
|
skeenp/Roam
|
refs/heads/master
|
src/configmanager/ui/__init__.py
|
25
|
__author__ = 'nathan.woodrow'
|
schleichdi2/OPENNFR-6.0-CORE
|
refs/heads/master
|
opennfr-openembedded-core/scripts/pybootchartgui/pybootchartgui.py
|
8
|
#!/usr/bin/env python
#
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import sys
from pybootchartgui.main import main
if __name__ == '__main__':
sys.exit(main())
|
viniciusgama/blog_gae
|
refs/heads/master
|
django/contrib/webdesign/lorem_ipsum.py
|
439
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
|
google-code/android-scripting
|
refs/heads/master
|
python/src/Lib/plat-mac/Audio_mac.py
|
73
|
QSIZE = 100000
error='Audio_mac.error'
from warnings import warnpy3k
warnpy3k("In 3.x, the Play_Audio_mac module is removed.", stacklevel=2)
class Play_Audio_mac:
def __init__(self, qsize=QSIZE):
self._chan = None
self._qsize = qsize
self._outrate = 22254
self._sampwidth = 1
self._nchannels = 1
self._gc = []
self._usercallback = None
def __del__(self):
self.stop()
self._usercallback = None
def wait(self):
import time
while self.getfilled():
time.sleep(0.1)
self._chan = None
self._gc = []
def stop(self, quietNow = 1):
##chan = self._chan
self._chan = None
##chan.SndDisposeChannel(1)
self._gc = []
def setoutrate(self, outrate):
self._outrate = outrate
def setsampwidth(self, sampwidth):
self._sampwidth = sampwidth
def setnchannels(self, nchannels):
self._nchannels = nchannels
def writeframes(self, data):
import time
from Carbon.Sound import bufferCmd, callBackCmd, extSH
import struct
import MacOS
if not self._chan:
from Carbon import Snd
self._chan = Snd.SndNewChannel(5, 0, self._callback)
nframes = len(data) / self._nchannels / self._sampwidth
if len(data) != nframes * self._nchannels * self._sampwidth:
raise error, 'data is not a whole number of frames'
while self._gc and \
self.getfilled() + nframes > \
self._qsize / self._nchannels / self._sampwidth:
time.sleep(0.1)
if self._sampwidth == 1:
import audioop
data = audioop.add(data, '\x80'*len(data), 1)
h1 = struct.pack('llHhllbbl',
id(data)+MacOS.string_id_to_buffer,
self._nchannels,
self._outrate, 0,
0,
0,
extSH,
60,
nframes)
h2 = 22*'\0'
h3 = struct.pack('hhlll',
self._sampwidth*8,
0,
0,
0,
0)
header = h1+h2+h3
self._gc.append((header, data))
self._chan.SndDoCommand((bufferCmd, 0, header), 0)
self._chan.SndDoCommand((callBackCmd, 0, 0), 0)
def _callback(self, *args):
del self._gc[0]
if self._usercallback:
self._usercallback()
def setcallback(self, callback):
self._usercallback = callback
def getfilled(self):
filled = 0
for header, data in self._gc:
filled = filled + len(data)
return filled / self._nchannels / self._sampwidth
def getfillable(self):
return (self._qsize / self._nchannels / self._sampwidth) - self.getfilled()
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def test():
import aifc
import EasyDialogs
fn = EasyDialogs.AskFileForOpen(message="Select an AIFF soundfile", typeList=("AIFF",))
if not fn: return
af = aifc.open(fn, 'r')
print af.getparams()
p = Play_Audio_mac()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = 10000
while 1:
data = af.readframes(BUFSIZ)
if not data: break
p.writeframes(data)
print 'wrote', len(data), 'space', p.getfillable()
p.wait()
if __name__ == '__main__':
test()
|
michellab/Sire
|
refs/heads/devel
|
wrapper/Mol/special_code.py
|
1
|
###############################################
#
# This file contains special code to help
# with the wrapping of SireVol classes
#
#
import sys
import pickle
from pyplusplus.module_builder import call_policies
sys.path.append("../AutoGenerate")
from scanheaders import *
atomprops = pickle.load( open("atomprops.data", "rb") )
chainprops = pickle.load( open("chainprops.data", "rb") )
cgprops = pickle.load( open("cgprops.data", "rb") )
resprops = pickle.load( open("resprops.data", "rb") )
segprops = pickle.load( open("segprops.data", "rb") )
beadprops = pickle.load( open("beadprops.data", "rb") )
active_headers = pickle.load( open("active_headers.data", "rb") )
return_const = "bp::return_value_policy<bp::copy_const_reference>()"
return_self = "bp::return_self< >()"
def fix_MolView(c, molview, props):
#now add in all of the header files
for header in props.dependencies():
c.add_declaration_code( "#include %s" % header )
#add accessor functions for all of the view properties
for property in props.properties():
p = property[0]
prop = property[1].replace("::","_").replace("<","_").replace(">","_")
c.add_registration_code( "def( \"_get_property_%s\", &%s::property< %s >, %s)" \
% (prop, molview, p, return_const) )
c.add_registration_code( "def( \"_get_metadata_%s\", get_Metadata_%s_function1, %s)" \
% (prop, prop, return_const) )
c.add_registration_code( "def( \"_get_metadata_%s\", &get_Metadata_%s_function2, %s)" \
% (prop, prop, return_const) )
c.add_declaration_code( """ const %s& get_Metadata_%s_function1(const %s &atom,
const QString &metakey){ return atom.metadata< %s >(metakey); }""" \
% (p, prop, molview, p) )
c.add_declaration_code( """ const %s& get_Metadata_%s_function2(const %s &atom,
const QString &key, const QString &metakey){
return atom.metadata< %s >(key, metakey); }""" \
% (p, prop, molview, p) )
def fix_Atom(c):
fix_MolView(c, "SireMol::Atom", atomprops)
def fix_Chain(c):
fix_MolView(c, "SireMol::Chain", chainprops)
def fix_CutGroup(c):
fix_MolView(c, "SireMol::CutGroup", cgprops)
def fix_Residue(c):
fix_MolView(c, "SireMol::Residue", resprops)
def fix_Segment(c):
fix_MolView(c, "SireMol::Segment", segprops)
def fix_Bead(c):
fix_MolView(c, "SireMol::Bead", beadprops)
def fix_MolEditorBase(c):
c.decls( "removeProperty" ).call_policies = call_policies.return_self()
c.decls( "removeMetadata" ).call_policies = call_policies.return_self()
c.add_registration_code( """def( \"setProperty\",
&SireMol::MolEditorBase::setProperty<SireBase::Property>, %s )""" \
% (return_self ) )
c.add_registration_code( "def( \"setMetadata\", &set_Metadata_function1, %s)" \
% (return_self) )
c.add_registration_code( "def( \"setMetadata\", &set_Metadata_function2, %s)" \
% (return_self) )
c.add_declaration_code( """SireMol::MolEditorBase& set_Metadata_function1(
SireMol::MolEditorBase &molview,
const QString &metakey, const SireBase::Property &p)
{ return molview.setMetadata<SireBase::Property>(metakey, p); }""" )
c.add_declaration_code( """SireMol::MolEditorBase& set_Metadata_function2(
SireMol::MolEditorBase &molview,
const QString &key, const QString &metakey,
const SireBase::Property &p)
{ return molview.setMetadata<SireBase::Property>(key, metakey, p); }""" )
def fix_MolViewEditorBase(c, molview, props):
c.decls( "removeProperty" ).call_policies = call_policies.return_self()
c.decls( "removeMetadata" ).call_policies = call_policies.return_self()
#now add the code to set properties and metadata
for header in props.dependencies():
c.add_declaration_code( "#include %s" % header )
#add accessor functions for all of the atom properties
for property in props.properties():
p = property[0]
p_rep = p.replace("::","_").replace("<","_").replace(">","_")
prop = property[1].replace("::","_").replace("<","_").replace(">","_")
c.add_registration_code( """def( \"_set_property_%s\",
&%s::setProperty< %s >, %s )""" \
% (p_rep, molview, p, return_self ) )
c.add_registration_code( "def( \"_set_metadata_%s\", &set_Metadata_%s_function1, %s)" \
% (p_rep, prop, return_self) )
c.add_registration_code( "def( \"_set_metadata_%s\", &set_Metadata_%s_function2, %s)" \
% (p_rep, prop, return_self) )
c.add_declaration_code( """%s& set_Metadata_%s_function1(
%s &molview,
const QString &metakey, const %s &p)
{ return molview.setMetadata< %s >(metakey, p); }""" \
% (molview, prop, molview, p, p) )
c.add_declaration_code( """%s& set_Metadata_%s_function2(
%s &molview,
const QString &key, const QString &metakey, const %s &p)
{ return molview.setMetadata< %s >(key, metakey, p); }""" \
% (molview, prop, molview, p, p) )
def fix_AtomEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::AtomEditorBase", atomprops)
def fix_ChainEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::ChainEditorBase", chainprops)
def fix_CGEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::CGEditorBase", cgprops)
def fix_ResEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::ResEditorBase", resprops)
def fix_SegEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::SegEditorBase", segprops)
def fix_BeadEditorBase(c):
fix_MolViewEditorBase(c, "SireMol::BeadEditorBase", beadprops)
def fix_AtomEditor(c):
c.decls( "rename" ).call_policies = call_policies.return_self()
c.decls( "renumber" ).call_policies = call_policies.return_self()
def fix_AtomStructureEditor(c):
fix_AtomEditor(c)
c.decls( "reindex" ).call_policies = call_policies.return_self()
c.decls( "reparent" ).call_policies = call_policies.return_self()
def fix_AtomSelection(c):
c.decls( "selectAll" ).call_policies = call_policies.return_self()
c.decls( "deselectAll" ).call_policies = call_policies.return_self()
c.decls( "selectNone" ).call_policies = call_policies.return_self()
c.decls( "select" ).call_policies = call_policies.return_self()
c.decls( "deselect" ).call_policies = call_policies.return_self()
c.decls( "selectOnly" ).call_policies = call_policies.return_self()
c.decls( "invert" ).call_policies = call_policies.return_self()
c.decls( "intersect" ).call_policies = call_policies.return_self()
c.decls( "unite" ).call_policies = call_policies.return_self()
c.decls( "subtract" ).call_policies = call_policies.return_self()
c.decls( "mask" ).call_policies = call_policies.return_self()
def fix_CGEditor(c):
c.decls( "rename" ).call_policies = call_policies.return_self()
def fix_CGStructureEditor(c):
fix_CGEditor(c)
c.decls( "reindex" ).call_policies = call_policies.return_self()
c.decls( "remove" ).call_policies = call_policies.return_self()
c.decls( "transfer" ).call_policies = call_policies.return_self()
c.decls( "transferAll" ).call_policies = call_policies.return_self()
fix_ChainEditor = fix_CGEditor
fix_ChainStructureEditor = fix_CGStructureEditor
fix_SegEditor = fix_CGEditor
fix_SegStructureEditor = fix_CGStructureEditor
def fix_ResEditor(c):
c.decls( "renumber" ).call_policies = call_policies.return_self()
c.decls( "rename" ).call_policies = call_policies.return_self()
def fix_ResStructureEditor(c):
fix_ResEditor(c)
c.decls( "reindex" ).call_policies = call_policies.return_self()
c.decls( "reparent" ).call_policies = call_policies.return_self()
c.decls( "remove" ).call_policies = call_policies.return_self()
c.decls( "transfer" ).call_policies = call_policies.return_self()
c.decls( "transferAll" ).call_policies = call_policies.return_self()
def fix_MolEditor(c):
c.decls( "renumber" ).call_policies = call_policies.return_self()
c.decls( "rename" ).call_policies = call_policies.return_self()
def fix_MolStructureEditor(c):
fix_MolEditor(c)
c.decls( "remove" ).call_policies = call_policies.return_self()
c.decls( "removeAllAtoms" ).call_policies = call_policies.return_self()
c.decls( "removeAllCutGroups" ).call_policies = call_policies.return_self()
c.decls( "removeAllResidues" ).call_policies = call_policies.return_self()
c.decls( "removeAllChains" ).call_policies = call_policies.return_self()
c.decls( "removeAllSegments" ).call_policies = call_policies.return_self()
def fix_ConnectivityEditor(c):
c.decls( "connect" ).call_policies = call_policies.return_self()
c.decls( "disconnect" ).call_policies = call_policies.return_self()
c.decls( "disconnectAll" ).call_policies = call_policies.return_self()
def fix_MGNum(c):
c.add_declaration_code( "#include \"mgid.h\"" )
c.add_declaration_code( "#include \"mgidx.h\"" )
c.add_declaration_code( "#include \"mgname.h\"" )
c.add_declaration_code( "#include \"mgnum.h\"" )
c.add_declaration_code( "#include \"moleculegroups.h\"")
fix_MGIdx = fix_MGNum
fix_MGName = fix_MGNum
def fix_MolNum(c):
c.add_declaration_code( "#include \"molid.h\"" )
c.add_declaration_code( "#include \"molidx.h\"" )
c.add_declaration_code( "#include \"molnum.h\"" )
c.add_declaration_code( "#include \"molname.h\"" )
c.add_declaration_code( "#include \"moleculegroup.h\"" )
c.add_declaration_code( "#include \"moleculegroups.h\"" )
c.add_declaration_code( "#include \"mover.hpp\"" )
fix_MolName = fix_MolNum
fix_MolIdx = fix_MolNum
def fix_MolInfo(c):
c.add_declaration_code( "#include \"moleculeinfodata.h\"" )
c.add_declaration_code( "#include \"atomselection.h\"" )
def fix_Mover(c):
c.decls("mapInto").call_policies = call_policies.return_self()
c.decls("transform").call_policies = call_policies.return_self()
c.decls("translate").call_policies = call_policies.return_self()
c.decls("rotate").call_policies = call_policies.return_self()
c.decls("transform").call_policies = call_policies.return_self()
c.decls("changeFrame").call_policies = call_policies.return_self()
c.decls("change").call_policies = call_policies.return_self()
c.decls("set").call_policies = call_policies.return_self()
c.decls("setAll").call_policies = call_policies.return_self()
c.decls("alignTo").call_policies = call_policies.return_self()
c.decls("align").call_policies = call_policies.return_self()
#also include all of the header files included in mover.cpp
for header in active_headers["mover.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_MolViewProperty(c):
c.add_declaration_code( "#include \"SireMaths/vector.h\"" )
c.add_declaration_code( "#include \"SireMol/moleculeview.h\"" )
c.decls("set").call_policies = call_policies.return_self()
def fix_AtomCoords(c):
fix_MolViewProperty(c)
c.add_declaration_code("#include \"SireMaths/quaternion.h\"")
c.add_declaration_code("#include \"SireMaths/matrix.h\"")
c.add_declaration_code("#include \"SireVol/aabox.h\"")
c.add_declaration_code("#include \"SireMaths/axisset.h\"")
def fix_CGAtomIdx(c):
c.add_declaration_code("#include \"cgidx.h\"")
c.add_declaration_code("#include \"SireID/index.h\"")
def fix_CGIdx(c):
c.add_declaration_code("#include \"SireID/index.h\"")
c.add_declaration_code("#include \"cgatomidx.h\"")
c.add_registration_code("def( other<SireID::Index>() + self )")
def fix_AtomID(c):
#also include all of the header files included in atomid.cpp
for header in active_headers["atomid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_CGID(c):
#also include all of the header files included in cgid.cpp
for header in active_headers["cgid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_ChainID(c):
#also include all of the header files included in chainid.cpp
for header in active_headers["chainid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_ResID(c):
#also include all of the header files included in resid.cpp
for header in active_headers["resid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_SegID(c):
#also include all of the header files included in segid.cpp
for header in active_headers["segid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_BeadID(c):
#also include all of the header files included in segid.cpp
for header in active_headers["beadid.h"].dependencies():
c.add_declaration_code( "#include %s" % header )
def fix_PerturbationSymbols(c):
c.mem_funs("lambda").rename("Lambda")
def fix_SelectResultMover(c):
c.decls("translate").call_policies = call_policies.return_self()
special_code = { "SireMol::Atom" : fix_Atom,
"SireMol::Editor<SireMol::AtomEditor, SireMol::Atom>" : fix_AtomEditorBase,
"SireMol::AtomEditor" : fix_AtomEditor,
"SireMol::AtomSelection" : fix_AtomSelection,
"SireMol::AtomStructureEditor" : fix_AtomStructureEditor,
"SireMol::Mover<SireMol::Atom>" : fix_Mover,
"SireMol::Mover<SireMol::Selector<SireMol::Atom> >" : fix_Mover,
"SireMol::AtomIdx" : fix_AtomID,
"SireMol::AtomName" : fix_AtomID,
"SireMol::AtomNum" : fix_AtomID,
"SireMol::CGIdx" : fix_CGID,
"SireMol::CGName" : fix_CGID,
"SireMol::ChainIdx" : fix_ChainID,
"SireMol::ChainName" : fix_ChainID,
"SireMol::ResIdx" : fix_ResID,
"SireMol::ResName" : fix_ResID,
"SireMol::ResNum" : fix_ResID,
"SireMol::SegIdx" : fix_SegID,
"SireMol::SegName" : fix_SegID,
"SireMol::BeadIdx" : fix_BeadID,
"SireMol::BeadNum" : fix_BeadID,
"SireMol::Bead" : fix_Bead,
"SireMol::Editor<SireMol::BeadEditor, SireMol::Bead>" : fix_BeadEditorBase,
"SireMol::Mover<SireMol::Bead>" : fix_Mover,
"SireMol::Mover<SireMol::Beads>" : fix_Mover,
"SireMol::CutGroup" : fix_CutGroup,
"SireMol::Editor<SireMol::CGEditor, SireMol::CutGroup>" : fix_CGEditorBase,
"SireMol::CGEditor" : fix_CGEditor,
"SireMol::CGStructureEditor" : fix_CGStructureEditor,
"SireMol::Mover<SireMol::CutGroup>" : fix_Mover,
"SireMol::Mover<SireMol::Selector<SireMol::CutGroup> >" : fix_Mover,
"SireMol::Chain" : fix_Chain,
"SireMol::Editor<SireMol::ChainEditor, SireMol::Chain>" : fix_ChainEditorBase,
"SireMol::ChainEditor" : fix_ChainEditor,
"SireMol::ChainStructureEditor" : fix_ChainStructureEditor,
"SireMol::Mover<SireMol::Chain>" : fix_Mover,
"SireMol::Mover<SireMol::Selector<SireMol::Chain> >" : fix_Mover,
"SireMol::Residue" : fix_Residue,
"SireMol::Editor<SireMol::ResEditor, SireMol::Residue>" : fix_ResEditorBase,
"SireMol::ResEditor" : fix_ResEditor,
"SireMol::ResStructureEditor" : fix_ResStructureEditor,
"SireMol::Mover<SireMol::Residue>" : fix_Mover,
"SireMol::Mover<SireMol::Selector<SireMol::Residue> >" : fix_Mover,
"SireMol::Segment" : fix_Segment,
"SireMol::Editor<SireMol::SegEditor, SireMol::Segment>" : fix_SegEditorBase,
"SireMol::SegEditor" : fix_SegEditor,
"SireMol::SegStructureEditor" : fix_SegStructureEditor,
"SireMol::Mover<SireMol::Segment>" : fix_Mover,
"SireMol::Mover<SireMol::Selector<SireMol::Segment> >" : fix_Mover,
"SireMol::SelectResultMover" : fix_SelectResultMover,
"SireMol::MolEditor" : fix_MolEditor,
"SireMol::Editor<SireMol::MolEditor, SireMol::Molecule>" : fix_MolEditorBase,
"SireMol::MolStructureEditor" : fix_MolStructureEditor,
"SireMol::Mover<SireMol::Molecule>" : fix_Mover,
"SireMol::Mover<SireMol::PartialMolecule>" : fix_Mover,
"AtomStringProperty" : fix_MolViewProperty,
"AtomIntProperty" : fix_MolViewProperty,
"AtomFloatProperty" : fix_MolViewProperty,
"AtomVariantProperty" : fix_MolViewProperty,
"BeadStringProperty" : fix_MolViewProperty,
"BeadIntProperty" : fix_MolViewProperty,
"BeadFloatProperty" : fix_MolViewProperty,
"BeadVariantProperty" : fix_MolViewProperty,
"CGStringProperty" : fix_MolViewProperty,
"CGIntProperty" : fix_MolViewProperty,
"CGFloatProperty" : fix_MolViewProperty,
"CGVariantProperty" : fix_MolViewProperty,
"ResStringProperty" : fix_MolViewProperty,
"ResIntProperty" : fix_MolViewProperty,
"ResFloatProperty" : fix_MolViewProperty,
"ResVariantProperty" : fix_MolViewProperty,
"ChainStringProperty" : fix_MolViewProperty,
"ChainIntProperty" : fix_MolViewProperty,
"ChainFloatProperty" : fix_MolViewProperty,
"ChainVariantProperty" : fix_MolViewProperty,
"SegStringProperty" : fix_MolViewProperty,
"SegIntProperty" : fix_MolViewProperty,
"SegFloatProperty" : fix_MolViewProperty,
"SegVariantProperty" : fix_MolViewProperty,
"AtomBeads" : fix_MolViewProperty,
"AtomCoords" : fix_AtomCoords,
"AtomCharges" : fix_MolViewProperty,
"AtomElements" : fix_MolViewProperty,
"AtomEnergies" : fix_MolViewProperty,
"AtomForces" : fix_MolViewProperty,
"AtomMasses" : fix_MolViewProperty,
"AtomVelocities" : fix_MolViewProperty,
"AtomPolarisabilities" : fix_MolViewProperty,
"AtomRadii" : fix_MolViewProperty,
"SireMol::ConnectivityEditor" : fix_ConnectivityEditor,
"SireMol::MGName" : fix_MGName,
"SireMol::MGIdx" : fix_MGIdx,
"SireMol::MGNum" : fix_MGNum,
"SireMol::MolNum" : fix_MolNum,
"SireMol::MolName" : fix_MolName,
"SireMol::MolIdx" : fix_MolIdx,
"SireMol::MolInfo" : fix_MolInfo,
"SireMol::MoleculeInfo" : fix_MolInfo,
"SireMol::PerturbationSymbols" : fix_PerturbationSymbols,
"SireMol::CGIdx" : fix_CGIdx,
"SireMol::CGAtomIdx" : fix_CGAtomIdx }
implicitly_convertible = [ ("SireMol::AtomID", "SireMol::AtomIdentifier"),
("SireMol::CGID", "SireMol::CGIdentifier"),
("SireMol::ChainID", "SireMol::ChainIdentifier"),
("SireMol::ResID", "SireMol::ResIdentifier"),
("SireMol::SegID", "SireMol::SegIdentifier"),
("SireMol::MolID", "SireMol::MolIdentifier"),
("SireMol::MGID", "SireMol::MGIdentifier"),
("SireMol::MoleculeView", "SireMol::MoleculeData"),
("SireMol::MoleculeView", "SireMol::PartialMolecule"),
("SireMol::MoleculeInfoData", "SireMol::MoleculeInfo"),
("SireMol::MoleculeInfo", "SireMol::MoleculeInfoData") ]
def fixMB(mb):
mb.add_declaration_code("#include \"SireMol/moleculedata.h\"")
mb.add_declaration_code("#include \"SireMol/moleculeview.h\"")
mb.add_declaration_code("#include \"SireMol/partialmolecule.h\"")
mb.add_declaration_code("#include \"SireMol/mover.hpp\"")
mb.add_declaration_code("#include \"SireMol/mgidentifier.h\"")
mb.add_declaration_code("#include \"SireMol/moleculeinfo.h\"")
|
codesparkle/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/savefrom.py
|
185
|
# coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from .common import InfoExtractor
class SaveFromIE(InfoExtractor):
IE_NAME = 'savefrom.net'
_VALID_URL = r'https?://[^.]+\.savefrom\.net/\#url=(?P<url>.*)$'
_TEST = {
'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com',
'info_dict': {
'id': 'UlVRAPW2WJY',
'ext': 'mp4',
'title': 'About Team Radical MMA | MMA Fighting',
'upload_date': '20120816',
'uploader': 'Howcast',
'uploader_id': 'Howcast',
'description': 're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*',
},
'params': {
'skip_download': True
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = os.path.splitext(url.split('/')[-1])[0]
return {
'_type': 'url',
'id': video_id,
'url': mobj.group('url'),
}
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/xml/dom/xmlbuilder.py
|
873
|
"""Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
|
jimklo/LearningRegistry
|
refs/heads/master
|
data-pumps/del-contents.py
|
2
|
# Copyright 2011 SRI International
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Simple script to delete all non-design docs out of couch.
Created on Feb 15, 2011
@author: jklo
'''
uri="http://localhost:5984"
path="/lrdemo/_design/envelope/_view/docs"
from restkit.resource import Resource
import json
def main():
res = Resource(uri)
clientResponse = res.get(path=path, headers={"Content-Type":"application/json"})
body = clientResponse.body_string()
docs = json.loads(body)
deletable = []
for result in docs["rows"]:
doc = result[u'value']
if doc.has_key(u'_id') == True and doc.has_key(u'_rev') == True:
deletable.append({ "_id": doc[u'_id'], "_rev": doc[u'_rev'], "_deleted": True })
clientResponse = res.post("/lrdemo/_bulk_docs", payload=json.dumps({"all_or_nothing": True, "docs": deletable}), headers={"Content-Type":"application/json"})
body = clientResponse.body_string()
print(body)
print ("done")
if __name__ == '__main__':
main()
|
mycelial/spore
|
refs/heads/master
|
jni/pjsip/sources/tests/pjsua/scripts-sendto/252_multipart_ok_clutter.py
|
57
|
# $Id: 252_multipart_ok_clutter.py 3243 2010-08-01 09:48:51Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
body = \
"""
This is the preamble. It is to be ignored, though it
is a handy place for composition agents to include an
explanatory note to non-MIME conformant readers.
--123:45
Content-Type: text/plain
The first part is definitely not SDP
--123:45
This is implicitly typed plain US-ASCII text.
It does NOT end with a linebreak.
--123:45
Content-Type: application/sdp
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
--123:45--
This is the epilogue. It is also to be ignored.
"""
args = "--null-audio --auto-answer 200 --max-calls 1"
extra_headers = "Content-Type: multipart/mixed; boundary=\"123:45\""
include = ["v=0", "m=audio"]
exclude = []
sendto_cfg = sip.SendtoCfg( "Valid but cluttered multipart/mixed body containing SDP",
pjsua_args=args, sdp="", resp_code=200,
extra_headers=extra_headers, body=body,
resp_inc=include, resp_exc=exclude)
|
shams169/pythonProject
|
refs/heads/master
|
env/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/util/request.py
|
780
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
dezynetechnologies/odoo
|
refs/heads/8.0
|
addons/sale_layout/models/sale_layout.py
|
180
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
|
janusnic/main-pyflakes
|
refs/heads/master
|
pyflakes/messages.py
|
14
|
"""
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class Redefined(Message):
message = 'redefinition of %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class LateFutureImport(Message):
message = 'future import(s) %r after other statements'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicity assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
|
ewdurbin/raven-python
|
refs/heads/master
|
tests/__init__.py
|
63
|
VERSION = 1.0
|
hgl888/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
third_party/cython/src/Cython/Tempita/__init__.py
|
101
|
# The original Tempita implements all of its templating code here.
# Moved it to _tempita.py to make the compilation portable.
from _tempita import *
|
Adnn/django
|
refs/heads/master
|
django/core/mail/backends/base.py
|
577
|
"""Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')
|
lizardsystem/lizard5-apps
|
refs/heads/master
|
lizard_wms/testsettings.py
|
1
|
import logging
import os
from lizard_ui.settingshelper import STATICFILES_FINDERS
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s %(levelname)s %(message)s')
DEBUG = True
TEMPLATE_DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'test.db'
SITE_ID = 1
INSTALLED_APPS = [
'lizard_wms',
'lizard_ui',
'lizard_map',
'lizard_maptree',
'lizard_security',
'south',
'staticfiles',
'compressor',
'django_nose',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
ROOT_URLCONF = 'lizard_wms.urls'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
DATABASES = {
'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'},
}
TEMPLATE_CONTEXT_PROCESSORS = (
# Default items.
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
# Needs to be added for django-staticfiles to allow you to use
# {{ STATIC_URL }}myapp/my.css in your templates.
'staticfiles.context_processors.static_url',
# For lizard-map
"django.core.context_processors.request",
)
# SETTINGS_DIR allows media paths and so to be relative to this settings file
# instead of hardcoded to c:\only\on\my\computer.
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout, for instance for
# BUILDOUT_DIR/var/static files to give django-staticfiles a proper place
# to place all collected static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
# Absolute path to the directory that holds user-uploaded media.
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
# Absolute path to the directory where django-staticfiles'
# "bin/django build_static" places all collected static files from all
# applications' /media directory.
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
MEDIA_URL = '/media/'
# URL for the per-application /media static files collected by
# django-staticfiles. Use it in templates like
# "{{ MEDIA_URL }}mypackage/my.css".
STATIC_URL = '/static_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash. Uses STATIC_URL as django-staticfiles nicely collects
# admin's static media into STATIC_ROOT/admin.
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# Almere base layer
MAP_SETTINGS = {
'base_layer_type': 'WMS', # OSM or WMS
'projection': 'EPSG:28992', # EPSG:900913, EPSG:28992
'display_projection': 'EPSG:28992', # EPSG:900913/28992/4326
'startlocation_x': '127000',
'startlocation_y': '473000',
'startlocation_zoom': '4',
'base_layer_wms': (
'http://kaart.almere.nl/wmsconnector/com.esri.wms.Esrimap?'
'SERVICENAME=AIKWMS&'),
'base_layer_wms_layers': (
'KLHOpenbaargebied,KLHBebouwing,KLHBedrijventerrein,KLHBos,'
'KLHWater,KLHStrand,KLHHoofdweg,KLHWeg,KLHVliegveld,KLHSnelweg,'
'KLHGemeentegrens,KLHBusbaan,KLHSpoorlijn,KLHTeksten,'
'KLTAutosnelweg,KLTBebouwingCentrum,KLTBedrijven,'
'KLTBedrijventerrein,KLTBijzondereBebouwing,KLTBosvak,KLTCentrum,'
'KLTFietspad,KLTGras,KLTHoofdweg,KLTLandbouwVeeteelt,KLTMoerasNatuur,'
'KLTOVbaan,KLTOverigePaden,KLTSpoorlijnWit,KLTSpoorlijnZwart,'
'KLTSportvelden,KLTSteiger,KLTStrand,KLTWater,KLTWijkwegen,'
'KLTWoningen,KLTWoongebied,KLTGemeentegrens,KLTHoogspanningsleiding,'
'KLTHoogspanningsmasten,KLTInOntwerp,KLTKabelbaan,KLTKavelsloot,'
'KLTWijknamen,KLTParknamen,KLTOpenwaternamen,KLTIndustrienamen,'
'KLTDreefnamen,GBKAWater,GBKAGras,GBKAPlantvak,GBKABeton,GBKABosvak,'
'GBKABraakLiggend,GBKAAsfalt'),
}
# MAP_SETTINGS = {
# 'base_layer_type': 'WMS', # OSM or WMS
# 'projection': 'EPSG:28992', # EPSG:900913, EPSG:28992
# 'display_projection': 'EPSG:28992', # EPSG:900913/28992/4326
# 'startlocation_x': '127000',
# 'startlocation_y': '473000',
# 'startlocation_zoom': '4',
# 'base_layer_wms': (
# 'http://nederlandwms.risicokaart.nl/wmsconnector/'
# 'com.esri.wms.Esrimap?'
# 'SERVICENAME=risicokaart_pub_nl_met_ondergrond&'),
# 'base_layer_wms_layers': (
# 'Outline_nederland,Dissolve_provincies,0,2,12,3,38,5,4,9,10'),
# }
SKIP_SOUTH_TESTS = True
SOUTH_TESTS_MIGRATE = False
LIZARD_WMS_STANDALONE = True
try:
# Import local settings that aren't stored in svn.
from lizard_wms.local_testsettings import *
except ImportError:
pass
|
jgoclawski/django
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/models.py
|
510
|
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
|
morreene/tradenews
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py
|
328
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, binary_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import ReparseException
from . import _utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overriden, we've been overriden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
|
VirusTotal/misp-modules
|
refs/heads/master
|
misp_modules/modules/import_mod/ocr.py
|
2
|
import sys
import json
import base64
from io import BytesIO
import logging
log = logging.getLogger('ocr')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
misperrors = {'error': 'Error'}
userConfig = {}
inputSource = ['file']
moduleinfo = {'version': '0.2', 'author': 'Alexandre Dulaunoy',
'description': 'Optical Character Recognition (OCR) module for MISP',
'module-type': ['import']}
moduleconfig = []
def handler(q=False):
# try to import modules and return errors if module not found
try:
from PIL import Image
except ImportError:
misperrors['error'] = "Please pip(3) install pillow"
return misperrors
try:
# Official ImageMagick module
from wand.image import Image as WImage
except ImportError:
misperrors['error'] = "Please pip(3) install wand"
return misperrors
try:
from pytesseract import image_to_string
except ImportError:
misperrors['error'] = "Please pip(3) install pytesseract"
return misperrors
if q is False:
return False
r = {'results': []}
request = json.loads(q)
document = base64.b64decode(request["data"])
document = WImage(blob=document)
if document.format == 'PDF':
with document as pdf:
# Get number of pages
pages = len(pdf.sequence)
log.debug("PDF with {} page(s) detected".format(pages))
# Create new image object where the height will be the number of pages. With huge PDFs this will overflow, break, consume silly memory etc…
img = WImage(width=pdf.width, height=pdf.height * pages)
# Cycle through pages and stitch it together to one big file
for p in range(pages):
log.debug("Stitching page {}".format(p + 1))
image = img.composite(pdf.sequence[p], top=pdf.height * p, left=0)
# Create a png blob
image = img.make_blob('png')
log.debug("Final image size is {}x{}".format(pdf.width, pdf.height * (p + 1)))
else:
image = document
image_file = BytesIO(image)
image_file.seek(0)
try:
im = Image.open(image_file)
except IOError:
misperrors['error'] = "Corrupt or not an image file."
return misperrors
ocrized = image_to_string(im)
freetext = {}
freetext['values'] = ocrized
freetext['types'] = ['freetext']
r['results'].append(freetext)
return r
def introspection():
modulesetup = {}
try:
userConfig
modulesetup['userConfig'] = userConfig
except NameError:
pass
try:
inputSource
modulesetup['inputSource'] = inputSource
except NameError:
pass
return modulesetup
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
if __name__ == '__main__':
x = open('test.json', 'r')
handler(q=x.read())
|
atodorov/anaconda
|
refs/heads/master
|
pyanaconda/modules/storage/storage_interface.py
|
6
|
#
# DBus interface for the storage.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from dasbus.server.property import emits_properties_changed
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.base import KickstartModuleInterface
from dasbus.server.interface import dbus_interface
from dasbus.typing import * # pylint: disable=wildcard-import
from pyanaconda.modules.common.containers import PartitioningContainer, TaskContainer
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
@dbus_interface(STORAGE.interface_name)
class StorageInterface(KickstartModuleInterface):
"""DBus interface for Storage module."""
def connect_signals(self):
"""Connect the signals."""
super().connect_signals()
self.watch_property(
"CreatedPartitioning", self.implementation.created_partitioning_changed
)
self.watch_property(
"AppliedPartitioning", self.implementation.applied_partitioning_changed
)
def ScanDevicesWithTask(self) -> ObjPath:
"""Scan all devices with a task.
:return: a path to a task
"""
return TaskContainer.to_object_path(
self.implementation.scan_devices_with_task()
)
@emits_properties_changed
def CreatePartitioning(self, method: Str) -> ObjPath:
"""Create a new partitioning.
Allowed values:
AUTOMATIC
CUSTOM
MANUAL
INTERACTIVE
BLIVET
:param method: a partitioning method
:return: a path to a partitioning
"""
return PartitioningContainer.to_object_path(
self.implementation.create_partitioning(PartitioningMethod(method))
)
@property
def CreatedPartitioning(self) -> List[ObjPath]:
"""List of all created partitioning modules.
:return: a list of DBus paths
"""
return PartitioningContainer.to_object_path_list(
self.implementation.created_partitioning
)
@emits_properties_changed
def ApplyPartitioning(self, partitioning: ObjPath):
"""Apply the partitioning.
:param partitioning: a path to a partitioning
"""
self.implementation.apply_partitioning(
PartitioningContainer.from_object_path(partitioning)
)
@property
def AppliedPartitioning(self) -> Str:
"""The applied partitioning.
An empty string is not a valid object path, so
the return type has to be a string in this case.
:return: a DBus path or an empty string
"""
partitioning = self.implementation.applied_partitioning
if not partitioning:
return ""
return PartitioningContainer.to_object_path(partitioning)
@emits_properties_changed
def ResetPartitioning(self):
"""Reset the scheduled partitioning.
Reset the applied partitioning and reset the storage models of all
partitioning modules to the latest model of the system’s storage
configuration.
This method will not rescan the system.
"""
self.implementation.reset_partitioning()
def WriteConfigurationWithTask(self) -> ObjPath:
"""Write the storage configuration with a task.
FIXME: This is a temporary workaround.
:return: an installation task
"""
return TaskContainer.to_object_path(
self.implementation.write_configuration_with_task()
)
|
eugene1g/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/__init__.py
|
135
|
# Required for Python to search this directory for module files
from .checkout import Checkout
|
harlanhaskins/swift
|
refs/heads/master
|
utils/pass-pipeline/src/pass_pipeline_library.py
|
77
|
import pass_pipeline as ppipe
import passes as p
def simplifycfg_silcombine_passlist():
return ppipe.PassList([
p.SimplifyCFG,
p.SILCombine,
p.SimplifyCFG,
])
def highlevel_loopopt_passlist():
return ppipe.PassList([
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.DCE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.LoopRotate,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
p.ABCOpt,
p.DCE,
p.COWArrayOpts,
p.DCE,
p.SwiftArrayOpts,
])
def lowlevel_loopopt_passlist():
return ppipe.PassList([
p.LICM,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
])
def inliner_for_optlevel(optlevel):
if optlevel == 'high':
return p.EarlyInliner
elif optlevel == 'mid':
return p.PerfInliner
elif optlevel == 'low':
return p.LateInliner
else:
raise RuntimeError('Unknown opt level')
def ssapass_passlist(optlevel):
return ppipe.PassList([
simplifycfg_silcombine_passlist(),
p.AllocBoxToStack,
p.CopyForwarding,
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.PerformanceConstantPropagation,
p.DCE,
p.CSE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.GlobalLoadStoreOpts,
# Need to add proper argument here
p.CodeMotion,
p.GlobalARCOpts,
p.SpeculativeDevirtualizer,
p.SILLinker,
inliner_for_optlevel(optlevel),
p.SimplifyCFG,
p.CodeMotion,
p.GlobalARCOpts,
])
def lower_passlist():
return ppipe.PassList([
p.DeadFunctionElimination,
p.DeadObjectElimination,
p.GlobalOpt,
p.CapturePropagation,
p.ClosureSpecializer,
p.SpeculativeDevirtualizer,
p.FunctionSignatureOpts,
])
def normal_passpipelines():
result = []
x = ppipe.PassPipeline('HighLevel', {'name': 'run_n_times', 'count': 2})
x.add_pass(ssapass_passlist('high'))
result.append(x)
x = ppipe.PassPipeline('EarlyLoopOpt', {'name': 'run_n_times', 'count': 1})
x.add_pass(highlevel_loopopt_passlist())
result.append(x)
x = ppipe.PassPipeline('MidLevelOpt', {'name': 'run_n_times', 'count': 2})
x.add_pass(ssapass_passlist('mid'))
result.append(x)
x = ppipe.PassPipeline('Lower', {'name': 'run_to_fixed_point'})
x.add_pass(lower_passlist())
result.append(x)
x = ppipe.PassPipeline('LowLevel', {'name': 'run_n_times', 'count': 1})
x.add_pass(ssapass_passlist('low'))
result.append(x)
x = ppipe.PassPipeline('LateLoopOpt', {'name': 'run_n_times', 'count': 1})
x.add_pass([lowlevel_loopopt_passlist(), p.DeadFunctionElimination])
result.append(x)
return result
|
habibmasuro/kivy
|
refs/heads/master
|
kivy/uix/spinner.py
|
24
|
'''
Spinner
=======
.. versionadded:: 1.4.0
.. image:: images/spinner.jpg
:align: right
Spinner is a widget that provides a quick way to select one value from a set.
In the default state, a spinner shows its currently selected value.
Touching the spinner displays a dropdown menu with all the other available
values from which the user can select a new one.
Example::
from kivy.base import runTouchApp
from kivy.uix.spinner import Spinner
spinner = Spinner(
# default value shown
text='Home',
# available values
values=('Home', 'Work', 'Other', 'Custom'),
# just for positioning in our example
size_hint=(None, None),
size=(100, 44),
pos_hint={'center_x': .5, 'center_y': .5})
def show_selected_value(spinner, text):
print('The spinner', spinner, 'have text', text)
spinner.bind(text=show_selected_value)
runTouchApp(spinner)
'''
__all__ = ('Spinner', 'SpinnerOption')
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.properties import ListProperty, ObjectProperty, BooleanProperty
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
class SpinnerOption(Button):
'''Special button used in the :class:`Spinner` dropdown list. By default,
this is just a :class:`~kivy.uix.button.Button` with a size_hint_y of None
and a height of :meth:`48dp <kivy.metrics.dp>`.
'''
pass
class Spinner(Button):
'''Spinner class, see module documentation for more information.
'''
values = ListProperty()
'''Values that can be selected by the user. It must be a list of strings.
:attr:`values` is a :class:`~kivy.properties.ListProperty` and defaults to
[].
'''
option_cls = ObjectProperty(SpinnerOption)
'''Class used to display the options within the dropdown list displayed
under the Spinner. The `text` property of the class will be used to
represent the value.
The option class requires:
- a `text` property, used to display the value.
- an `on_release` event, used to trigger the option when pressed/touched.
- a :attr:`~kivy.uix.widget.Widget.size_hint_y` of None.
- the :attr:`~kivy.uix.widget.Widget.height` to be set.
:attr:`option_cls` is an :class:`~kivy.properties.ObjectProperty` and
defaults to :class:`SpinnerOption`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
dropdown_cls = ObjectProperty(DropDown)
'''Class used to display the dropdown list when the Spinner is pressed.
:attr:`dropdown_cls` is an :class:`~kivy.properties.ObjectProperty` and
defaults to :class:`~kivy.uix.dropdown.DropDown`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
is_open = BooleanProperty(False)
'''By default, the spinner is not open. Set to True to open it.
:attr:`is_open` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
.. versionadded:: 1.4.0
'''
def __init__(self, **kwargs):
self._dropdown = None
super(Spinner, self).__init__(**kwargs)
fbind = self.fbind
build_dropdown = self._build_dropdown
fbind('on_release', self._toggle_dropdown)
fbind('dropdown_cls', build_dropdown)
fbind('option_cls', build_dropdown)
fbind('values', self._update_dropdown)
build_dropdown()
def _build_dropdown(self, *largs):
if self._dropdown:
self._dropdown.unbind(on_select=self._on_dropdown_select)
self._dropdown.unbind(on_dismiss=self._close_dropdown)
self._dropdown.dismiss()
self._dropdown = None
cls = self.dropdown_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
self._dropdown = cls()
self._dropdown.bind(on_select=self._on_dropdown_select)
self._dropdown.bind(on_dismiss=self._close_dropdown)
self._update_dropdown()
def _update_dropdown(self, *largs):
dp = self._dropdown
cls = self.option_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
dp.clear_widgets()
for value in self.values:
item = cls(text=value)
item.bind(on_release=lambda option: dp.select(option.text))
dp.add_widget(item)
def _toggle_dropdown(self, *largs):
self.is_open = not self.is_open
def _close_dropdown(self, *largs):
self.is_open = False
def _on_dropdown_select(self, instance, data, *largs):
self.text = data
self.is_open = False
def on_is_open(self, instance, value):
if value:
self._dropdown.open(self)
else:
if self._dropdown.attach_to:
self._dropdown.dismiss()
|
bsmrstu-warriors/Moytri--The-Drone-Aider
|
refs/heads/master
|
Lib/site-packages/numpy/doc/glossary.py
|
81
|
"""
========
Glossary
========
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operation can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called `ufuncs`_, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
BLAS
`Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `doc.broadcasting`_ for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print "Logging call with parameters:", args, kwargs
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <http://docs.python.org/tut>`_.
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House(object):
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combintion with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print "Key %d: %s" % (n, k)
...
Key 0: a
Key 1: b
Key 2: c
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <http://docs.python.org/tut>`_. For a mapping
type (key-value), see *dictionary*.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True], dtype=bool)
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
<BLANKLINE>
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New Numpy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush(object):
... color = 'blue'
...
... def paint(self):
... print "Painting the city %s!" % self.color
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence::
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually::
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise array operation. Examples include
``add``, ``sin`` and ``logical_or``.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
"""
|
zedr/django
|
refs/heads/master
|
docs/_ext/djangodocs.py
|
15
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
from sphinx.util.nodes import set_source_info
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
self.verbatim = ''
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append('\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (fname,))
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': 'Deprecated in Django %s',
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
mikebryant/django-autoconfig
|
refs/heads/master
|
django_autoconfig/app_settings.py
|
1
|
'''Application settings for django-autoconfig.'''
from django.conf import settings
#: Extra URLs for autourlconf
AUTOCONFIG_EXTRA_URLS = getattr(settings, 'AUTOCONFIG_EXTRA_URLS', ())
#: A list/tuple of apps to exclude from the autourlconf
AUTOCONFIG_URLCONF_EXCLUDE_APPS = getattr(settings, 'AUTOCONFIG_URLCONF_EXCLUDE_APPS', ())
#: A view name (suitable for reverse()) that the base / will redirect to.
AUTOCONFIG_INDEX_VIEW = getattr(settings, 'AUTOCONFIG_INDEX_VIEW', None)
#: A dictionary from app name to the prefix it should be mapped to
#: The default for each app is the app name itself, with _ replaced by -
AUTOCONFIG_URL_PREFIXES = getattr(settings, 'AUTOCONFIG_URL_PREFIXES', {})
|
potash/scikit-learn
|
refs/heads/master
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
73
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
romankagan/DDBWorkbench
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/multilineString.py
|
83
|
for line in <selection>"""A
B
C"""</selection>:
print line
|
Linutux/Gourmet
|
refs/heads/master
|
gourmet/exporters/recipe_emailer.py
|
7
|
#!/usr/bin/env python
import urllib, StringIO, os.path
import exporter, html_exporter, pdf_exporter
from gourmet import gglobals
from gettext import gettext as _
import gourmet.gtk_extras.dialog_extras as de
from gourmet.gdebug import debug
class StringIOfaker (StringIO.StringIO):
def __init__ (self, *args, **kwargs):
StringIO.StringIO.__init__(self, *args, **kwargs)
def close (self, *args):
pass
def close_really (self):
StringIO.StringIO.close(self)
class Emailer:
def __init__ (self, emailaddress=None, subject=None, body=None, attachments=[]):
self.emailaddress=None
self.subject=subject
self.body=body
self.attachments=attachments
self.connector_string = "?"
def send_email (self):
self.url = "mailto:"
if self.emailaddress: self.url += self.emailaddress
if self.subject:
self.url_append('subject',self.subject)
if self.body:
self.url_append('body',self.body)
for a in self.attachments:
self.url_append('attachment',a)
debug('launching URL %s'%self.url,0)
gglobals.launch_url(self.url)
def url_append (self, attr, value):
self.url += "%s%s=%s"%(self.connector(),attr,urllib.quote(value.encode('utf-8','replace')))
def connector (self):
retval = self.connector_string
self.connector_string = "&"
return retval
class RecipeEmailer (Emailer):
def __init__ (self, recipes, rd, conv=None, change_units=True):
Emailer.__init__(self)
self.recipes = recipes
self.rd = rd
self.conv = conv
self.change_units=change_units
if len(recipes) > 1:
self.subject = _("Recipes")
elif recipes:
self.subject = recipes[0].title
def write_email_text (self):
s = StringIOfaker()
first = True
e=exporter.ExporterMultirec(self.rd,
self.recipes,
s,
conv=self.conv,
padding="\n\n-----\n")
e.run()
if not self.body: self.body=""
self.body += s.getvalue()
s.close_really()
def write_email_html (self):
for r in self.recipes:
fi = os.path.join(gglobals.tmpdir,"%s.htm"%r.title)
ofi = open(fi,'w')
e=html_exporter.html_exporter(self.rd,
r,
ofi,
conv=self.conv,
embed_css=True,
imagedir="")
ofi.close()
self.attachments.append(fi)
for i in e.images:
self.attachments.append(i)
def write_email_pdf (self):
prefs = pdf_exporter.get_pdf_prefs()
for r in self.recipes:
fi = os.path.join(gglobals.tmpdir,"%s.pdf"%r.title)
ofi = open(fi,'w')
e = pdf_exporter.PdfExporter(self.rd,
r,
ofi,
conv=self.conv,
change_units=self.change_units,
pdf_args=prefs)
ofi.close()
self.attachments.append(fi)
def send_email_html (self, emailaddress=None, include_plain_text=True):
if include_plain_text: self.write_email_text()
else: self.body = None
if emailaddress: self.emailaddress=emailaddress
self.write_email_html()
self.send_email()
def send_email_text (self, emailaddress=None):
if emailaddress: self.emailaddress=emailaddress
self.write_email_text()
self.send_email()
class EmailerDialog (RecipeEmailer):
def __init__ (self, recipes, rd, prefs, conv=None):
RecipeEmailer.__init__(self, recipes, rd, conv=conv, change_units=prefs.get('readableUnits',True))
self.prefs = prefs
self.option_list = {'':''}
self.options = {
_('Include Recipe in Body of E-mail (A good idea no matter what)'):('email_include_body',True),
_('E-mail Recipe as HTML Attachment'):('email_include_html',False),
_('E-mail Recipe as PDF Attachment'):('email_include_pdf',True),
}
self.option_list = []
self.email_options = {}
for k,v in self.options.items():
self.email_options[v[0]]=apply(self.prefs.get,v)
self.option_list.append([k,self.email_options[v[0]]])
def dont_ask_cb (self, widget, *args):
if widget.get_active():
self.prefs['emailer_dont_ask']=True
else:
self.prefs['emailer_dont_ask']=False
def setup_dialog (self, force = False):
if force or not self.prefs.get('emailer_dont_ask',False):
d=de.PreferencesDialog(options=self.option_list,
option_label=_("Email Options"),
value_label="",
dont_ask_cb=self.dont_ask_cb,
dont_ask_custom_text=_("Don't ask before sending e-mail."))
retlist = d.run()
if retlist:
for o in retlist:
k = o[0]
v = o[1]
pref = self.options[k][0]
self.email_options[pref]=v
self.prefs[pref]=v
def email (self, address=None):
if address: self.emailaddress=address
if self.email_options['email_include_body']:
self.write_email_text()
if self.email_options['email_include_html']:
self.write_email_html()
if self.email_options['email_include_pdf']:
self.write_email_pdf()
if not self.email_options['email_include_body'] and not self.email_options['email_include_body']:
de.show_message(_("E-mail not sent"),
sublabel=_("You have not chosen to include the recipe in the body of the message or as an attachment.")
)
else:
self.send_email()
if __name__ == '__main__':
import gourmet.recipeManager
rd = gourmet.recipeManager.default_rec_manager()
rec = rd.fetch_one(rd.recipe_table)
ed = EmailerDialog([rec],rd,{})
ed.setup_dialog()
ed.email()
#ed.run()
#e.write_email_text()
#e.write_email_pdf()
#e.write_email_html()
#e.send_email()
|
13916688528/themort
|
refs/heads/master
|
retrieval/mortgages/mc_util.py
|
4
|
# vim: set fileencoding=utf-8
# Utility functions
import mc_db
import main
import urllib2
import html5lib
import re
import string
import themortgagemeter_utils
import themortgagemeter_db
# Given a string, extracts the percent value as a decimal string
# Returns blank string if none could be found
# Takes string like "5 years fixed" and returns mortgage type
def get_mortgage_type(s,logger):
str_lower = s.lower()
res = re.match(r'^.*(fixed|tracker|variable|discount).*$',themortgagemeter_utils.remove_non_ascii(str_lower))
type_str = res.group(1)
if type_str == 'fixed':
mortgage_type = 'F'
elif type_str == 'tracker' or type_str == 'variable' or type_str == 'discount':
mortgage_type = 'T'
else:
logger.critical('unable to determine mortgage_type from str: ' + type_str)
themortgagemeter_utils.record_alert('ERROR: unable to determine mortgage_type from str',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
return mortgage_type
def check_data(rate_percent,booking_fee,ltv_percent,apr_percent,initial_period,logger):
# Now we check that the values we have are the right type:
if themortgagemeter_utils.isnumber(rate_percent) != True:
logger.critical('problem with rate_percent:' + rate_percent)
themortgagemeter_utils.record_alert('ERROR: problem with rate_percent',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
exit()
elif booking_fee.isdigit() != True:
logger.critical('problem with booking_fee:' + booking_fee)
themortgagemeter_utils.record_alert('ERROR: problem with booking_fee',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
exit()
elif themortgagemeter_utils.isnumber(ltv_percent) != True:
logger.critical('problem with ltv_percent: ' + ltv_percent)
themortgagemeter_utils.record_alert('ERROR: problem with ltv_percent',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
exit()
elif themortgagemeter_utils.isnumber(apr_percent) != True:
logger.critical('problem with apr_percent: ' + apr_percent)
themortgagemeter_utils.record_alert('ERROR: problem with apr_percent',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
exit()
# mortgage type must be ok
elif str(initial_period).isdigit() != True:
logger.critical('problem with initial_period: ' + str(initial_period))
themortgagemeter_utils.record_alert('ERROR: problem with initial_period',logger,themortgagemeter_db.db_connection,themortgagemeter_db.cursor)
exit()
# Handle the insert of mortgage details, not adding if already there and inserting retrieval record.
#
# percents are expected to be a string, and decimal, eg 4.50
#
def handle_mortgage_insert(institution_code, mortgage_type, rate_percent, svr_percent, apr_percent, ltv_percent, initial_period, booking_fee, term, url, eligibility, logger):
logger.debug(institution_code)
logger.debug(mortgage_type)
logger.debug(rate_percent)
logger.debug(svr_percent)
logger.debug(apr_percent)
logger.debug(ltv_percent)
logger.debug(initial_period)
logger.debug(booking_fee)
logger.debug(term)
logger.debug(url)
logger.debug(eligibility)
check_data(rate_percent,booking_fee,ltv_percent,apr_percent,initial_period,logger)
rate_percent_int = int(float(rate_percent) * 100)
apr_percent_int = int(float(apr_percent) * 100)
ltv_percent_int = int(float(ltv_percent) * 100)
svr_percent_int = int(float(svr_percent) * 100)
if mc_db.is_mortgage_there(institution_code, mortgage_type, rate_percent_int, svr_percent_int, apr_percent_int, ltv_percent_int, initial_period, booking_fee, term, eligibility) == 0:
logger.debug('Mortgage being added')
mortgage_id = mc_db.insert_mortgage(institution_code, mortgage_type, rate_percent_int, svr_percent_int, apr_percent_int, ltv_percent_int, initial_period, booking_fee, term, eligibility)
main.update_changes(True,institution_code,logger)
else:
logger.debug('Mortgage already there')
mortgage_id = mc_db.get_mortgage_id(institution_code, mortgage_type, rate_percent_int, svr_percent_int, apr_percent_int, ltv_percent_int, initial_period, booking_fee, term, eligibility)
# Get the url id:
url_id = mc_db.get_url_id_insert_if_there(url)
mc_db.update_jrnl(main.today,mortgage_id,url_id,institution_code)
# Returns a basic eligibility dict
# See conversions.py in shared
# <type> = Truth value of: existing_customer,ftb,moving_home,borrowing_more,remortgage,switching
# NFTB = F,T,F,F,F,F
# NMH = F,F,T,F,F,F
# NRM = F,F,F,F,T,F
# EDE = T,F,F,F,T,F
# EMH = T,F,T,F,F,F
# EBM = T,F,F,T,F,F
# EED = T,F,F,F,F,T
# Return a raw mortgage eligibility dict
def get_mortgage_eligibility_dict():
return {'existing_customer' : 'F', 'ftb' : 'F', 'moving_home' : 'F', 'borrowing_more' : 'F', 'remortgage' : 'F', 'switching' : 'F'}
# Takes a list of eligibility data - and returns list of matching eligibilities.
# eg if a deal is open to existing and new customers, it will return the E* and N* codes
# eg if a deal if true for all it will return all codes.
# B = "Both"
# T = "True"
# F = "False"
def validate_eligibility_dict(eligibility_dict,list_so_far):
#print eligibility_dict
#print list_so_far
for key in eligibility_dict.keys():
if eligibility_dict[key] == 'B':
a = eligibility_dict.copy()
b = eligibility_dict.copy()
a[key] = 'T'
b[key] = 'F'
return validate_eligibility_dict(a,list_so_far) + validate_eligibility_dict(b,list_so_far)
if eligibility_dict == {'existing_customer' : 'F', 'ftb' : 'T', 'moving_home' : 'F', 'borrowing_more' : 'F', 'remortgage' : 'F', 'switching' : 'F'}:
return list_so_far + ['NFTB']
elif eligibility_dict == {'existing_customer' : 'F', 'ftb' : 'F', 'moving_home' : 'T', 'borrowing_more' : 'F', 'remortgage' : 'F', 'switching' : 'F'}:
return list_so_far + ['NMH']
elif eligibility_dict == {'existing_customer' : 'F', 'ftb' : 'F', 'moving_home' : 'F', 'borrowing_more' : 'F', 'remortgage' : 'T', 'switching' : 'F'}:
return list_so_far + ['NRM']
elif eligibility_dict == {'existing_customer' : 'T', 'ftb' : 'F', 'moving_home' : 'F', 'borrowing_more' : 'F', 'remortgage' : 'T', 'switching' : 'F'}:
return list_so_far + ['EDE']
elif eligibility_dict == {'existing_customer' : 'T', 'ftb' : 'F', 'moving_home' : 'T', 'borrowing_more' : 'F', 'remortgage' : 'F', 'switching' : 'F'}:
return list_so_far + ['EMH']
elif eligibility_dict == {'existing_customer' : 'T', 'ftb' : 'F', 'moving_home' : 'F', 'borrowing_more' : 'T', 'remortgage' : 'F', 'switching' : 'F'}:
return list_so_far + ['EBM']
elif eligibility_dict == {'existing_customer' : 'T', 'ftb' : 'F', 'moving_home' : 'F', 'borrowing_more' : 'F', 'remortgage' : 'F', 'switching' : 'T'}:
return list_so_far + ['EED']
else:
return list_so_far
|
gminds/rapidnewsng
|
refs/heads/master
|
django/db/backends/postgresql_psycopg2/base.py
|
103
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import logging
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.encoding import force_str
from django.utils.safestring import SafeText, SafeBytes
from django.utils import six
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
logger = logging.getLogger('django.db.backends')
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
except Database.DatabaseError as e:
six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self._set_isolation_level(level)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
self._pg_version = None
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def close(self):
self.validate_thread_sharing()
if self.connection is None:
return
try:
self.connection.close()
self.connection = None
except Database.Error:
# In some cases (database restart, network connection lost etc...)
# the connection to the database is lost without giving Django a
# notification. If we don't set self.connection to None, the error
# will occur a every request.
self.connection = None
logger.warning('psycopg2 error while closing the connection.',
exc_info=sys.exc_info()
)
raise
def _get_pg_version(self):
if self._pg_version is None:
self._pg_version = get_version(self.connection)
return self._pg_version
pg_version = property(_get_pg_version)
def _cursor(self):
settings_dict = self.settings_dict
if self.connection is None:
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
# Set the time zone in autocommit mode (see #17062)
self.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
self.connection.set_isolation_level(self.isolation_level)
self._get_pg_version()
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in range(5)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError as e:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
|
Tehsmash/ironic
|
refs/heads/staging/kiloplus
|
ironic/tests/db/utils.py
|
2
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic test utilities."""
from oslo.utils import timeutils
from ironic.common import states
from ironic.db import api as db_api
def get_test_ipmi_info():
return {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin",
"ipmi_password": "fake"
}
def get_test_ipmi_bridging_parameters():
return {
"ipmi_bridging": "dual",
"ipmi_local_address": "0x20",
"ipmi_transit_channel": "0",
"ipmi_transit_address": "0x82",
"ipmi_target_channel": "7",
"ipmi_target_address": "0x72"
}
def get_test_ssh_info(auth_type='password'):
result = {
"ssh_address": "1.2.3.4",
"ssh_username": "admin",
"ssh_port": 22,
"ssh_virt_type": "vbox",
}
if 'password' == auth_type:
result['ssh_password'] = 'fake'
elif 'file' == auth_type:
result['ssh_key_filename'] = '/not/real/file'
elif 'key' == auth_type:
result['ssh_key_contents'] = '--BEGIN PRIVATE ...blah'
elif 'too_many' == auth_type:
result['ssh_password'] = 'fake'
result['ssh_key_filename'] = '/not/real/file'
else:
# No auth details (is invalid)
pass
return result
def get_test_pxe_driver_info():
return {
"pxe_deploy_kernel": "glance://deploy_kernel_uuid",
"pxe_deploy_ramdisk": "glance://deploy_ramdisk_uuid",
}
def get_test_pxe_instance_info():
return {
"image_source": "glance://image_uuid",
"root_gb": 100,
}
def get_test_seamicro_info():
return {
"seamicro_api_endpoint": "http://1.2.3.4",
"seamicro_username": "admin",
"seamicro_password": "fake",
"seamicro_server_id": "0/0",
}
def get_test_ilo_info():
return {
"ilo_address": "1.2.3.4",
"ilo_username": "admin",
"ilo_password": "fake",
}
def get_test_drac_info():
return {
"drac_host": "1.2.3.4",
"drac_port": "443",
"drac_path": "/wsman",
"drac_protocol": "https",
"drac_username": "admin",
"drac_password": "fake",
}
def get_test_agent_instance_info():
return {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum',
'image_disk_format': 'qcow2',
'image_container_format': 'bare',
}
def get_test_agent_driver_info():
return {
'agent_url': 'http://127.0.0.1/foo',
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
}
def get_test_iboot_info():
return {
"iboot_address": "1.2.3.4",
"iboot_username": "admin",
"iboot_password": "fake",
}
def get_test_snmp_info(**kw):
result = {
"snmp_driver": kw.get("snmp_driver", "teltronix"),
"snmp_address": kw.get("snmp_address", "1.2.3.4"),
"snmp_port": kw.get("snmp_port", "161"),
"snmp_outlet": kw.get("snmp_outlet", "1"),
"snmp_version": kw.get("snmp_version", "1")
}
if result["snmp_version"] in ("1", "2c"):
result["snmp_community"] = kw.get("snmp_community", "public")
elif result["snmp_version"] == "3":
result["snmp_security"] = kw.get("snmp_security", "public")
return result
def get_test_node(**kw):
properties = {
"cpu_arch": "x86_64",
"cpus": "8",
"local_gb": "10",
"memory_mb": "4096",
}
fake_info = {"foo": "bar"}
return {
'id': kw.get('id', 123),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'),
'chassis_id': kw.get('chassis_id', 42),
'conductor_affinity': kw.get('conductor_affinity', None),
'power_state': kw.get('power_state', states.NOSTATE),
'target_power_state': kw.get('target_power_state', states.NOSTATE),
'provision_state': kw.get('provision_state', states.NOSTATE),
'target_provision_state': kw.get('target_provision_state',
states.NOSTATE),
'provision_updated_at': kw.get('provision_updated_at'),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
'instance_info': kw.get('instance_info', fake_info),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', fake_info),
'properties': kw.get('properties', properties),
'reservation': kw.get('reservation'),
'maintenance': kw.get('maintenance', False),
'maintenance_reason': kw.get('maintenance_reason'),
'console_enabled': kw.get('console_enabled', False),
'extra': kw.get('extra', {}),
'updated_at': kw.get('updated_at'),
'created_at': kw.get('created_at'),
}
def create_test_node(**kw):
"""Create test node entry in DB and return Node DB object.
Function to be used to create test Node objects in the database.
:param kw: kwargs with overriding values for node's attributes.
:returns: Test Node DB object.
"""
node = get_test_node(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del node['id']
dbapi = db_api.get_instance()
return dbapi.create_node(node)
def get_test_port(**kw):
return {
'id': kw.get('id', 987),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def create_test_port(**kw):
"""Create test port entry in DB and return Port DB object.
Function to be used to create test Port objects in the database.
:param kw: kwargs with overriding values for port's attributes.
:returns: Test Port DB object.
"""
port = get_test_port(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del port['id']
dbapi = db_api.get_instance()
return dbapi.create_port(port)
def get_test_chassis(**kw):
return {
'id': kw.get('id', 42),
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
'extra': kw.get('extra', {}),
'description': kw.get('description', 'data-center-1-chassis'),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at'),
}
def get_test_conductor(**kw):
return {
'id': kw.get('id', 6),
'hostname': kw.get('hostname', 'test-conductor-node'),
'drivers': kw.get('drivers', ['fake-driver', 'null-driver']),
'created_at': kw.get('created_at', timeutils.utcnow()),
'updated_at': kw.get('updated_at', timeutils.utcnow()),
}
|
toninhofpt/my-first-blog1
|
refs/heads/master
|
myven/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/fields.py
|
200
|
from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
pellaeon/bsd-cloudinit
|
refs/heads/master
|
cloudbaseinit/openstack/common/rpc/amqp.py
|
5
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implementations based on
AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
uses AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
import six
from cloudbaseinit.openstack.common import excutils
from cloudbaseinit.openstack.common.gettextutils import _, _LE
from cloudbaseinit.openstack.common import local
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
return self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % self._call_waiters)
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshold)
self._num_call_waiters_wrn_threshold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibility.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
for (key, value) in six.iteritems(context)])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in
six.iteritems(context.to_dict())])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug('UNIQUE_ID is %s.' % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool,
wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data):
self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
six.reraise(self.exc_info[1], None, self.exc_info[2])
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, 'received %s', message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug('Expected exception during message handling (%s)' %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_LE('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug('Making synchronous call on %s ...', topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug('MSG_ID is %s' % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug('Making asynchronous cast on %s...', topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug('Making asynchronous fanout cast...')
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug('Sending %(event_type)s on %(topic)s',
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
gauribhoite/personfinder
|
refs/heads/master
|
rtd/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py
|
2754
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
lavvy/xbmc
|
refs/heads/master
|
lib/gtest/xcode/Scripts/versiongenerate.py
|
3088
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
zmalltalker/pytrafikanten
|
refs/heads/master
|
setup.py
|
1
|
from distutils.core import setup
import trafikanten
author, email = trafikanten.__author__[:-1].split(' <')
setup(name='trafikanten',
version=trafikanten.__version__,
description="Interface for realtime data from Norwegian public transportation",
long_description=trafikanten.__doc__,
author=author,
author_email=email,
url=trafikanten.__url__,
packages=['trafikanten'],
license=trafikanten.__license__,
scripts=['scripts/tf_search', 'scripts/tf_realtime'],
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
],
)
|
synasius/django
|
refs/heads/master
|
tests/unmanaged_models/tests.py
|
23
|
from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a.set([a])
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
|
alexthered/kienhoc-platform
|
refs/heads/master
|
common/djangoapps/student/models.py
|
9
|
"""
Models for User Information (students, staff, etc)
Migration Notes
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration student --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/
"""
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from functools import total_ordering
import hashlib
from importlib import import_module
import json
import logging
from pytz import UTC
from urllib import urlencode
import uuid
import analytics
from config_models.models import ConfigurationModel
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import models, IntegrityError, transaction
from django.db.models import Count
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver, Signal
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_noop
from django.core.cache import cache
from django_countries.fields import CountryField
import dogstats_wrapper as dog_stats_api
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from simple_history.models import HistoricalRecords
from south.modelsinspector import add_introspection_rules
from track import contexts
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
from certificates.models import GeneratedCertificate
from course_modes.models import CourseMode
import lms.lib.comment_client as cc
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, ECOMMERCE_DATE_FORMAT
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from util.model_utils import emit_field_changed_events, get_changed_fields_dict
from util.query import use_read_replica_if_available
from util.milestones_helpers import is_entrance_exams_enabled
UNENROLL_DONE = Signal(providing_args=["course_enrollment", "skip_refund"])
log = logging.getLogger(__name__)
AUDIT_LOG = logging.getLogger("audit")
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name
UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll'
ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled'
ENROLLED_TO_ENROLLED = 'from enrolled to enrolled'
ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled'
UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled'
ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled'
UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled'
DEFAULT_TRANSITION_STATE = 'N/A'
TRANSITION_STATES = (
(UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL),
(ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED),
(ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED),
(ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED),
(UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED),
(ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED),
(UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED),
(DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE)
)
class AnonymousUserId(models.Model):
"""
This table contains user, course_Id and anonymous_user_id
Purpose of this table is to provide user by anonymous_user_id.
We generate anonymous_user_id using md5 algorithm,
and use result in hex form, so its length is equal to 32 bytes.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User, db_index=True)
anonymous_user_id = models.CharField(unique=True, max_length=32)
course_id = CourseKeyField(db_index=True, max_length=255, blank=True)
unique_together = (user, course_id)
def anonymous_id_for_user(user, course_id, save=True):
"""
Return a unique id for a (user, course) pair, suitable for inserting
into e.g. personalized survey links.
If user is an `AnonymousUser`, returns `None`
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated.
if user.is_anonymous():
return None
cached_id = getattr(user, '_anonymous_id', {}).get(course_id)
if cached_id is not None:
return cached_id
# include the secret key as a salt, and to make the ids unique across different LMS installs.
hasher = hashlib.md5()
hasher.update(settings.SECRET_KEY)
hasher.update(unicode(user.id))
if course_id:
hasher.update(course_id.to_deprecated_string().encode('utf-8'))
digest = hasher.hexdigest()
if not hasattr(user, '_anonymous_id'):
user._anonymous_id = {} # pylint: disable=protected-access
user._anonymous_id[course_id] = digest # pylint: disable=protected-access
if save is False:
return digest
try:
anonymous_user_id, __ = AnonymousUserId.objects.get_or_create(
defaults={'anonymous_user_id': digest},
user=user,
course_id=course_id
)
if anonymous_user_id.anonymous_user_id != digest:
log.error(
u"Stored anonymous user id %r for user %r "
u"in course %r doesn't match computed id %r",
user,
course_id,
anonymous_user_id.anonymous_user_id,
digest
)
except IntegrityError:
# Another thread has already created this entry, so
# continue
pass
return digest
def user_by_anonymous_id(uid):
"""
Return user by anonymous_user_id using AnonymousUserId lookup table.
Do not raise `django.ObjectDoesNotExist` exception,
if there is no user for anonymous_student_id,
because this function will be used inside xmodule w/o django access.
"""
if uid is None:
return None
try:
return User.objects.get(anonymoususerid__anonymous_user_id=uid)
except ObjectDoesNotExist:
return None
class UserStanding(models.Model):
"""
This table contains a student's account's status.
Currently, we're only disabling accounts; in the future we can imagine
taking away more specific privileges, like forums access, or adding
more specific karma levels or probationary stages.
"""
ACCOUNT_DISABLED = "disabled"
ACCOUNT_ENABLED = "enabled"
USER_STANDING_CHOICES = (
(ACCOUNT_DISABLED, u"Account Disabled"),
(ACCOUNT_ENABLED, u"Account Enabled"),
)
user = models.ForeignKey(User, db_index=True, related_name='standing', unique=True)
account_status = models.CharField(
blank=True, max_length=31, choices=USER_STANDING_CHOICES
)
changed_by = models.ForeignKey(User, blank=True)
standing_last_changed_at = models.DateTimeField(auto_now=True)
class UserProfile(models.Model):
"""This is where we store all the user demographic fields. We have a
separate table for this rather than extending the built-in Django auth_user.
Notes:
* Some fields are legacy ones from the first run of 6.002, from which
we imported many users.
* Fields like name and address are intentionally open ended, to account
for international variations. An unfortunate side-effect is that we
cannot efficiently sort on last names for instance.
Replication:
* Only the Portal servers should ever modify this information.
* All fields are replicated into relevant Course databases
Some of the fields are legacy ones that were captured during the initial
MITx fall prototype.
"""
# cache key format e.g user.<user_id>.profile.country = 'SG'
PROFILE_COUNTRY_CACHE_KEY = u"user.{user_id}.profile.country"
class Meta(object):
db_table = "auth_userprofile"
# CRITICAL TODO/SECURITY
# Sanitize all fields.
# This is not visible to other users, but could introduce holes later
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile')
name = models.CharField(blank=True, max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary for future expansion
courseware = models.CharField(blank=True, max_length=255, default='course.xml')
# Location is no longer used, but is held here for backwards compatibility
# for users imported from our first class.
language = models.CharField(blank=True, max_length=255, db_index=True)
location = models.CharField(blank=True, max_length=255, db_index=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = range(this_year, this_year - 120, -1)
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (
('m', ugettext_noop('Male')),
('f', ugettext_noop('Female')),
# Translators: 'Other' refers to the student's gender
('o', ugettext_noop('Other/Prefer Not to Say'))
)
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', ugettext_noop('Doctorate')),
('m', ugettext_noop("Master's or professional degree")),
('b', ugettext_noop("Bachelor's degree")),
('a', ugettext_noop("Associate degree")),
('hs', ugettext_noop("Secondary/high school")),
('jhs', ugettext_noop("Junior secondary/junior high/middle school")),
('el', ugettext_noop("Elementary/primary school")),
# Translators: 'None' refers to the student's level of education
('none', ugettext_noop("No Formal Education")),
# Translators: 'Other' refers to the student's level of education
('other', ugettext_noop("Other Education"))
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
city = models.TextField(blank=True, null=True)
country = CountryField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False)
profile_image_uploaded_at = models.DateTimeField(null=True)
@property
def has_profile_image(self):
"""
Convenience method that returns a boolean indicating whether or not
this user has uploaded a profile image.
"""
return self.profile_image_uploaded_at is not None
@property
def age(self):
""" Convenience method that returns the age given a year_of_birth. """
year_of_birth = self.year_of_birth
year = datetime.now(UTC).year
if year_of_birth is not None:
return year - year_of_birth
@property
def level_of_education_display(self):
""" Convenience method that returns the human readable level of education. """
if self.level_of_education:
return self.__enumerable_to_display(self.LEVEL_OF_EDUCATION_CHOICES, self.level_of_education)
@property
def gender_display(self):
""" Convenience method that returns the human readable gender. """
if self.gender:
return self.__enumerable_to_display(self.GENDER_CHOICES, self.gender)
def get_meta(self): # pylint: disable=missing-docstring
js_str = self.meta
if not js_str:
js_str = dict()
else:
js_str = json.loads(self.meta)
return js_str
def set_meta(self, meta_json): # pylint: disable=missing-docstring
self.meta = json.dumps(meta_json)
def set_login_session(self, session_id=None):
"""
Sets the current session id for the logged-in user.
If session_id doesn't match the existing session,
deletes the old session object.
"""
meta = self.get_meta()
old_login = meta.get('session_id', None)
if old_login:
SessionStore(session_key=old_login).delete()
meta['session_id'] = session_id
self.set_meta(meta)
self.save()
def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True):
"""Returns true if this user requires parental consent.
Args:
date (Date): The date for which consent needs to be tested (defaults to now).
age_limit (int): The age limit at which parental consent is no longer required.
This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'.
default_requires_consent (bool): True if users require parental consent if they
have no specified year of birth (default is True).
Returns:
True if the user requires parental consent.
"""
if age_limit is None:
age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None)
if age_limit is None:
return False
# Return True if either:
# a) The user has a year of birth specified and that year is fewer years in the past than the limit.
# b) The user has no year of birth specified and the default is to require consent.
#
# Note: we have to be conservative using the user's year of birth as their birth date could be
# December 31st. This means that if the number of years since their birth year is exactly equal
# to the age limit then we have to assume that they might still not be old enough.
year_of_birth = self.year_of_birth
if year_of_birth is None:
return default_requires_consent
if date is None:
age = self.age
else:
age = date.year - year_of_birth
return age <= age_limit
def __enumerable_to_display(self, enumerables, enum_value):
""" Get the human readable value from an enumerable list of key-value pairs. """
return dict(enumerables)[enum_value]
@classmethod
def country_cache_key_name(cls, user_id):
"""Return cache key name to be used to cache current country.
Args:
user_id(int): Id of user.
Returns:
Unicode cache key
"""
return cls.PROFILE_COUNTRY_CACHE_KEY.format(user_id=user_id)
@receiver(models.signals.post_save, sender=UserProfile)
def invalidate_user_profile_country_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of country in UserProfile model. """
changed_fields = getattr(instance, '_changed_fields', {})
if 'country' in changed_fields:
cache_key = UserProfile.country_cache_key_name(instance.user_id)
cache.delete(cache_key)
log.info("Country changed in UserProfile for %s, cache deleted", instance.user_id)
@receiver(pre_save, sender=UserProfile)
def user_profile_pre_save_callback(sender, **kwargs):
"""
Ensure consistency of a user profile before saving it.
"""
user_profile = kwargs['instance']
# Remove profile images for users who require parental consent
if user_profile.requires_parental_consent() and user_profile.has_profile_image:
user_profile.profile_image_uploaded_at = None
# Cache "old" field values on the model instance so that they can be
# retrieved in the post_save callback when we emit an event with new and
# old field values.
user_profile._changed_fields = get_changed_fields_dict(user_profile, sender)
@receiver(post_save, sender=UserProfile)
def user_profile_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the UserProfile.
"""
user_profile = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user_profile,
user_profile.user,
sender._meta.db_table,
excluded_fields=['meta']
)
@receiver(pre_save, sender=User)
def user_pre_save_callback(sender, **kwargs):
"""
Capture old fields on the user instance before save and cache them as a
private field on the current model for use in the post_save callback.
"""
user = kwargs['instance']
user._changed_fields = get_changed_fields_dict(user, sender)
@receiver(post_save, sender=User)
def user_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the User.
"""
user = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user,
user,
sender._meta.db_table,
excluded_fields=['last_login', 'first_name', 'last_name'],
hidden_fields=['password']
)
class UserSignupSource(models.Model):
"""
This table contains information about users registering
via Micro-Sites
"""
user = models.ForeignKey(User, db_index=True)
site = models.CharField(max_length=255, db_index=True)
def unique_id_for_user(user, save=True):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# Setting course_id to '' makes it not affect the generated hash,
# and thus produce the old per-student anonymous id
return anonymous_id_for_user(user, None, save=save)
# TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
users = models.ManyToManyField(User, db_index=True)
name = models.CharField(blank=False, max_length=32, db_index=True)
description = models.TextField(blank=True)
class Registration(models.Model):
''' Allows us to wait for e-mail before user is registered. A
registration profile is created when the user creates an
account, but that account is inactive. Once the user clicks
on the activation key, it becomes active. '''
class Meta(object):
db_table = "auth_registration"
user = models.ForeignKey(User, unique=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def register(self, user):
# MINOR TODO: Switch to crypto-secure key
self.activation_key = uuid.uuid4().hex
self.user = user
self.save()
def activate(self):
self.user.is_active = True
self.user.save()
class PendingNameChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_name = models.CharField(blank=True, max_length=255)
rationale = models.CharField(blank=True, max_length=1024)
class PendingEmailChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_email = models.CharField(blank=True, max_length=255, db_index=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def request_change(self, email):
"""Request a change to a user's email.
Implicitly saves the pending email change record.
Arguments:
email (unicode): The proposed new email for the user.
Returns:
unicode: The activation code to confirm the change.
"""
self.new_email = email
self.activation_key = uuid.uuid4().hex
self.save()
return self.activation_key
EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated'
EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated'
EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed'
class PasswordHistory(models.Model):
"""
This model will keep track of past passwords that a user has used
as well as providing contraints (e.g. can't reuse passwords)
"""
user = models.ForeignKey(User)
password = models.CharField(max_length=128)
time_set = models.DateTimeField(default=timezone.now)
def create(self, user):
"""
This will copy over the current password, if any of the configuration has been turned on
"""
if not (PasswordHistory.is_student_password_reuse_restricted() or
PasswordHistory.is_staff_password_reuse_restricted() or
PasswordHistory.is_password_reset_frequency_restricted() or
PasswordHistory.is_staff_forced_password_reset_enabled() or
PasswordHistory.is_student_forced_password_reset_enabled()):
return
self.user = user
self.password = user.password
self.save()
@classmethod
def is_student_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_staff_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_password_reset_frequency_restricted(cls):
"""
Returns whether the configuration which limits the password reset frequency has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'
)
return min_days_between_reset
@classmethod
def is_staff_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_between_reset
@classmethod
def is_student_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_pw_reset
@classmethod
def should_user_reset_password_now(cls, user):
"""
Returns whether a password has 'expired' and should be reset. Note there are two different
expiry policies for staff and students
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
days_before_password_reset = None
if user.is_staff:
if cls.is_staff_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS']
elif cls.is_student_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS']
if days_before_password_reset:
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
time_last_reset = None
if history:
# first element should be the last time we reset password
time_last_reset = history[0].time_set
else:
# no history, then let's take the date the user joined
time_last_reset = user.date_joined
now = timezone.now()
delta = now - time_last_reset
return delta.days >= days_before_password_reset
return False
@classmethod
def is_password_reset_too_soon(cls, user):
"""
Verifies that the password is not getting reset too frequently
"""
if not cls.is_password_reset_frequency_restricted():
return False
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
if not history:
return False
now = timezone.now()
delta = now - history[0].time_set
return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
@classmethod
def is_allowable_password_reuse(cls, user, new_password):
"""
Verifies that the password adheres to the reuse policies
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return True
if user.is_staff and cls.is_staff_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
elif cls.is_student_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
else:
min_diff_passwords_required = 0
# just limit the result set to the number of different
# password we need
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required]
for entry in history:
# be sure to re-use the same salt
# NOTE, how the salt is serialized in the password field is dependent on the algorithm
# in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element
hash_elements = entry.password.split('$')
algorithm = hash_elements[0]
if algorithm == 'pbkdf2_sha256':
hashed_password = make_password(new_password, hash_elements[2])
elif algorithm == 'sha1':
hashed_password = make_password(new_password, hash_elements[1])
else:
# This means we got something unexpected. We don't want to throw an exception, but
# log as an error and basically allow any password reuse
AUDIT_LOG.error('''
Unknown password hashing algorithm "{0}" found in existing password
hash, password reuse policy will not be enforced!!!
'''.format(algorithm))
return True
if entry.password == hashed_password:
return False
return True
class LoginFailures(models.Model):
"""
This model will keep track of failed login attempts
"""
user = models.ForeignKey(User)
failure_count = models.IntegerField(default=0)
lockout_until = models.DateTimeField(null=True)
@classmethod
def is_feature_enabled(cls):
"""
Returns whether the feature flag around this functionality has been set
"""
return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS']
@classmethod
def is_user_locked_out(cls, user):
"""
Static method to return in a given user has his/her account locked out
"""
try:
record = LoginFailures.objects.get(user=user)
if not record.lockout_until:
return False
now = datetime.now(UTC)
until = record.lockout_until
is_locked_out = until and now < until
return is_locked_out
except ObjectDoesNotExist:
return False
@classmethod
def increment_lockout_counter(cls, user):
"""
Ticks the failed attempt counter
"""
record, _ = LoginFailures.objects.get_or_create(user=user)
record.failure_count = record.failure_count + 1
max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED
# did we go over the limit in attempts
if record.failure_count >= max_failures_allowed:
# yes, then store when this account is locked out until
lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS
record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs)
record.save()
@classmethod
def clear_lockout_counter(cls, user):
"""
Removes the lockout counters (normally called after a successful login)
"""
try:
entry = LoginFailures.objects.get(user=user)
entry.delete()
except ObjectDoesNotExist:
return
class CourseEnrollmentException(Exception):
pass
class NonExistentCourseError(CourseEnrollmentException):
pass
class EnrollmentClosedError(CourseEnrollmentException):
pass
class CourseFullError(CourseEnrollmentException):
pass
class AlreadyEnrolledError(CourseEnrollmentException):
pass
class CourseEnrollmentManager(models.Manager):
"""
Custom manager for CourseEnrollment with Table-level filter methods.
"""
def num_enrolled_in(self, course_id):
"""
Returns the count of active enrollments in a course.
'course_id' is the course_id to return enrollments
"""
enrollment_number = super(CourseEnrollmentManager, self).get_query_set().filter(
course_id=course_id,
is_active=1
).count()
return enrollment_number
def is_course_full(self, course):
"""
Returns a boolean value regarding whether a course has already reached it's max enrollment
capacity
"""
is_course_full = False
if course.max_student_enrollments_allowed is not None:
is_course_full = self.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed
return is_course_full
def users_enrolled_in(self, course_id):
"""Return a queryset of User for every user enrolled in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
def enrollment_counts(self, course_id):
"""
Returns a dictionary that stores the total enrollment count for a course, as well as the
enrollment count for each individual mode.
"""
# Unfortunately, Django's "group by"-style queries look super-awkward
query = use_read_replica_if_available(
super(CourseEnrollmentManager, self).get_query_set().filter(course_id=course_id, is_active=True).values(
'mode').order_by().annotate(Count('mode')))
total = 0
enroll_dict = defaultdict(int)
for item in query:
enroll_dict[item['mode']] = item['mode__count']
total += item['mode__count']
enroll_dict['total'] = total
return enroll_dict
def enrolled_and_dropped_out_users(self, course_id):
"""Return a queryset of Users in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id
)
class CourseEnrollment(models.Model):
"""
Represents a Student's Enrollment record for a single Course. You should
generally not manipulate CourseEnrollment objects directly, but use the
classmethods provided to enroll, unenroll, or check on the enrollment status
of a given student.
We're starting to consolidate course enrollment logic in this class, but
more should be brought in (such as checking against CourseEnrollmentAllowed,
checking course dates, user permissions, etc.) This logic is currently
scattered across our views.
"""
MODEL_TAGS = ['course_id', 'is_active', 'mode']
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
# Represents the modes that are possible. We'll update this later with a
# list of possible values.
mode = models.CharField(default="honor", max_length=100)
objects = CourseEnrollmentManager()
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
# cache key format e.g enrollment.<username>.<course_key>.mode = 'honor'
COURSE_ENROLLMENT_CACHE_KEY = u"enrollment.{}.{}.mode"
class Meta(object):
unique_together = (('user', 'course_id'),)
ordering = ('user', 'course_id')
def __init__(self, *args, **kwargs):
super(CourseEnrollment, self).__init__(*args, **kwargs)
# Private variable for storing course_overview to minimize calls to the database.
# When the property .course_overview is accessed for the first time, this variable will be set.
self._course_overview = None
def __unicode__(self):
return (
"[CourseEnrollment] {}: {} ({}); active: ({})"
).format(self.user, self.course_id, self.created, self.is_active)
@classmethod
def get_or_create_enrollment(cls, user, course_key):
"""
Create an enrollment for a user in a class. By default *this enrollment
is not active*. This is useful for when an enrollment needs to go
through some sort of approval process before being activated. If you
don't need this functionality, just call `enroll()` instead.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
# If we're passing in a newly constructed (i.e. not yet persisted) User,
# save it to the database so that it can have an ID that we can throw
# into our CourseEnrollment object. Otherwise, we'll get an
# IntegrityError for having a null user_id.
assert isinstance(course_key, CourseKey)
if user.id is None:
user.save()
try:
enrollment, created = CourseEnrollment.objects.get_or_create(
user=user,
course_id=course_key,
)
# If we *did* just create a new enrollment, set some defaults
if created:
enrollment.mode = "honor"
enrollment.is_active = False
enrollment.save()
except IntegrityError:
log.info(
(
"An integrity error occurred while getting-or-creating the enrollment"
"for course key %s and student %s. This can occur if two processes try to get-or-create "
"the enrollment at the same time and the database is set to REPEATABLE READ. We will try "
"committing the transaction and retrying."
),
course_key, user
)
transaction.commit()
enrollment = CourseEnrollment.objects.get(
user=user,
course_id=course_key,
)
return enrollment
@classmethod
def get_enrollment(cls, user, course_key):
"""Returns a CoursewareEnrollment object.
Args:
user (User): The user associated with the enrollment.
course_id (CourseKey): The key of the course associated with the enrollment.
Returns:
Course enrollment object or None
"""
try:
return CourseEnrollment.objects.get(
user=user,
course_id=course_key
)
except cls.DoesNotExist:
return None
@classmethod
def is_enrollment_closed(cls, user, course):
"""
Returns a boolean value regarding whether the user has access to enroll in the course. Returns False if the
enrollment has been closed.
"""
# Disable the pylint error here, as per ormsbee. This local import was previously
# in CourseEnrollment.enroll
from courseware.access import has_access # pylint: disable=import-error
return not has_access(user, 'enroll', course)
def update_enrollment(self, mode=None, is_active=None, skip_refund=False):
"""
Updates an enrollment for a user in a class. This includes options
like changing the mode, toggling is_active True/False, etc.
Also emits relevant events for analytics purposes.
This saves immediately.
"""
activation_changed = False
# if is_active is None, then the call to update_enrollment didn't specify
# any value, so just leave is_active as it is
if self.is_active != is_active and is_active is not None:
self.is_active = is_active
activation_changed = True
mode_changed = False
# if mode is None, the call to update_enrollment didn't specify a new
# mode, so leave as-is
if self.mode != mode and mode is not None:
self.mode = mode
mode_changed = True
if activation_changed or mode_changed:
self.save()
if activation_changed:
if self.is_active:
self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)
dog_stats_api.increment(
"common.student.enrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
else:
UNENROLL_DONE.send(sender=None, course_enrollment=self, skip_refund=skip_refund)
self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)
dog_stats_api.increment(
"common.student.unenrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
if mode_changed:
# the user's default mode is "honor" and disabled for a course
# mode change events will only be emitted when the user's mode changes from this
self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED)
def emit_event(self, event_name):
"""
Emits an event to explicitly track course enrollment and unenrollment.
"""
try:
context = contexts.course_context_from_course_id(self.course_id)
assert isinstance(self.course_id, CourseKey)
data = {
'user_id': self.user.id,
'course_id': self.course_id.to_deprecated_string(),
'mode': self.mode,
}
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, data)
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user_id, event_name, {
'category': 'conversion',
'label': self.course_id.to_deprecated_string(),
'org': self.course_id.org,
'course': self.course_id.course,
'run': self.course_id.run,
'mode': self.mode,
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except: # pylint: disable=bare-except
if event_name and self.course_id:
log.exception(
u'Unable to emit event %s for user %s and course %s',
event_name,
self.user.username, # pylint: disable=no-member
self.course_id,
)
@classmethod
def enroll(cls, user, course_key, mode="honor", check_access=False):
"""
Enroll a user in a course. This saves immediately.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_key` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is 'honor', meaning honor certificate. Other options
include 'professional', 'verified', 'audit',
'no-id-professional' and 'credit'.
See CourseMode in common/djangoapps/course_modes/models.py.
`check_access`: if True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Exceptions that can be raised: NonExistentCourseError,
EnrollmentClosedError, CourseFullError, AlreadyEnrolledError. All these
are subclasses of CourseEnrollmentException if you want to catch all of
them in the same way.
It is expected that this method is called from a method which has already
verified the user authentication.
Also emits relevant events for analytics purposes.
"""
# All the server-side checks for whether a user is allowed to enroll.
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
# This is here to preserve legacy behavior which allowed enrollment in courses
# announced before the start of content creation.
if check_access:
log.warning(u"User %s failed to enroll in non-existent course %s", user.username, unicode(course_key))
raise NonExistentCourseError
if check_access:
if CourseEnrollment.is_enrollment_closed(user, course):
log.warning(
u"User %s failed to enroll in course %s because enrollment is closed",
user.username,
course_key.to_deprecated_string()
)
raise EnrollmentClosedError
if CourseEnrollment.objects.is_course_full(course):
log.warning(
u"User %s failed to enroll in full course %s",
user.username,
course_key.to_deprecated_string(),
)
raise CourseFullError
if CourseEnrollment.is_enrolled(user, course_key):
log.warning(
u"User %s attempted to enroll in %s, but they were already enrolled",
user.username,
course_key.to_deprecated_string()
)
if check_access:
raise AlreadyEnrolledError
# User is allowed to enroll if they've reached this point.
enrollment = cls.get_or_create_enrollment(user, course_key)
enrollment.update_enrollment(is_active=True, mode=mode)
return enrollment
@classmethod
def enroll_by_email(cls, email, course_id, mode="honor", ignore_errors=True):
"""
Enroll a user in a course given their email. This saves immediately.
Note that enrolling by email is generally done in big batches and the
error rate is high. For that reason, we supress User lookup errors by
default.
Returns a CoursewareEnrollment object. If the User does not exist and
`ignore_errors` is set to `True`, it will return None.
`email` Email address of the User to add to enroll in the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
`ignore_errors` is a boolean indicating whether we should suppress
`User.DoesNotExist` errors (returning None) or let it
bubble up.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
try:
user = User.objects.get(email=email)
return cls.enroll(user, course_id, mode)
except User.DoesNotExist:
err_msg = u"Tried to enroll email {} into course {}, but user not found"
log.error(err_msg.format(email, course_id))
if ignore_errors:
return None
raise
@classmethod
def unenroll(cls, user, course_id, skip_refund=False):
"""
Remove the user from a given course. If the relevant `CourseEnrollment`
object doesn't exist, we log an error but don't throw an exception.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`skip_refund` can be set to True to avoid the refund process.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
record.update_enrollment(is_active=False, skip_refund=skip_refund)
except cls.DoesNotExist:
log.error(
u"Tried to unenroll student %s from %s but they were not enrolled",
user,
course_id
)
@classmethod
def unenroll_by_email(cls, email, course_id):
"""
Unenroll a user from a course given their email. This saves immediately.
User lookup errors are logged but will not throw an exception.
`email` Email address of the User to unenroll from the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
user = User.objects.get(email=email)
return cls.unenroll(user, course_id)
except User.DoesNotExist:
log.error(
u"Tried to unenroll email %s from course %s, but user not found",
email,
course_id
)
@classmethod
def is_enrolled(cls, user, course_key):
"""
Returns True if the user is enrolled in the course (the entry must exist
and it must have `is_active=True`). Otherwise, returns False.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
if not user.is_authenticated():
return False
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_key)
return record.is_active
except cls.DoesNotExist:
return False
@classmethod
def is_enrolled_by_partial(cls, user, course_id_partial):
"""
Returns `True` if the user is enrolled in a course that starts with
`course_id_partial`. Otherwise, returns False.
Can be used to determine whether a student is enrolled in a course
whose run name is unknown.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id_partial` (CourseKey) is missing the run component
"""
assert isinstance(course_id_partial, CourseKey)
assert not course_id_partial.run # None or empty string
course_key = SlashSeparatedCourseKey(course_id_partial.org, course_id_partial.course, '')
querystring = unicode(course_key.to_deprecated_string())
try:
return CourseEnrollment.objects.filter(
user=user,
course_id__startswith=querystring,
is_active=1
).exists()
except cls.DoesNotExist:
return False
@classmethod
def enrollment_mode_for_user(cls, user, course_id):
"""
Returns the enrollment mode for the given user for the given course
`user` is a Django User object
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
Returns (mode, is_active) where mode is the enrollment mode of the student
and is_active is whether the enrollment is active.
Returns (None, None) if the courseenrollment record does not exist.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
return (record.mode, record.is_active)
except cls.DoesNotExist:
return (None, None)
@classmethod
def enrollments_for_user(cls, user):
return CourseEnrollment.objects.filter(user=user, is_active=1)
def is_paid_course(self):
"""
Returns True, if course is paid
"""
paid_course = CourseMode.is_white_label(self.course_id)
if paid_course or CourseMode.is_professional_slug(self.mode):
return True
return False
def activate(self):
"""Makes this `CourseEnrollment` record active. Saves immediately."""
self.update_enrollment(is_active=True)
def deactivate(self):
"""Makes this `CourseEnrollment` record inactive. Saves immediately. An
inactive record means that the student is not enrolled in this course.
"""
self.update_enrollment(is_active=False)
def change_mode(self, mode):
"""Changes this `CourseEnrollment` record's mode to `mode`. Saves immediately."""
self.update_enrollment(mode=mode)
def refundable(self):
"""
For paid/verified certificates, students may receive a refund if they have
a verified certificate and the deadline for refunds has not yet passed.
"""
# In order to support manual refunds past the deadline, set can_refund on this object.
# On unenrolling, the "UNENROLL_DONE" signal calls CertificateItem.refund_cert_callback(),
# which calls this method to determine whether to refund the order.
# This can't be set directly because refunds currently happen as a side-effect of unenrolling.
# (side-effects are bad)
if getattr(self, 'can_refund', None) is not None:
return True
# If the student has already been given a certificate they should not be refunded
if GeneratedCertificate.certificate_for_student(self.user, self.course_id) is not None:
return False
# If it is after the refundable cutoff date they should not be refunded.
refund_cutoff_date = self.refund_cutoff_date()
if refund_cutoff_date and datetime.now(UTC) > refund_cutoff_date:
return False
course_mode = CourseMode.mode_for_course(self.course_id, 'verified')
if course_mode is None:
return False
else:
return True
def refund_cutoff_date(self):
""" Calculate and return the refund window end date. """
try:
attribute = self.attributes.get(namespace='order', name='order_number') # pylint: disable=no-member
except ObjectDoesNotExist:
return None
order_number = attribute.value
order = ecommerce_api_client(self.user).orders(order_number).get()
refund_window_start_date = max(
datetime.strptime(order['date_placed'], ECOMMERCE_DATE_FORMAT),
self.course_overview.start.replace(tzinfo=None)
)
return refund_window_start_date.replace(tzinfo=UTC) + EnrollmentRefundConfiguration.current().refund_window
@property
def username(self):
return self.user.username
@property
def course(self):
# Deprecated. Please use the `course_overview` property instead.
return self.course_overview
@property
def course_overview(self):
"""
Returns a CourseOverview of the course to which this enrollment refers.
Returns None if an error occurred while trying to load the course.
Note:
If the course is re-published within the lifetime of this
CourseEnrollment object, then the value of this property will
become stale.
"""
if not self._course_overview:
try:
self._course_overview = CourseOverview.get_from_id(self.course_id)
except (CourseOverview.DoesNotExist, IOError):
self._course_overview = None
return self._course_overview
def is_verified_enrollment(self):
"""
Check the course enrollment mode is verified or not
"""
return CourseMode.is_verified_slug(self.mode)
@classmethod
def is_enrolled_as_verified(cls, user, course_key):
"""
Check whether the course enrollment is for a verified mode.
Arguments:
user (User): The user object.
course_key (CourseKey): The identifier for the course.
Returns: bool
"""
enrollment = cls.get_enrollment(user, course_key)
return (
enrollment is not None and
enrollment.is_active and
enrollment.is_verified_enrollment()
)
@classmethod
def cache_key_name(cls, user_id, course_key):
"""Return cache key name to be used to cache current configuration.
Args:
user_id(int): Id of user.
course_key(unicode): Unicode of course key
Returns:
Unicode cache key
"""
return cls.COURSE_ENROLLMENT_CACHE_KEY.format(user_id, unicode(course_key))
@receiver(models.signals.post_save, sender=CourseEnrollment)
@receiver(models.signals.post_delete, sender=CourseEnrollment)
def invalidate_enrollment_mode_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of CourseEnrollment model. """
cache_key = CourseEnrollment.cache_key_name(
instance.user.id,
unicode(instance.course_id)
)
cache.delete(cache_key)
class ManualEnrollmentAudit(models.Model):
"""
Table for tracking which enrollments were performed through manual enrollment.
"""
enrollment = models.ForeignKey(CourseEnrollment, null=True)
enrolled_by = models.ForeignKey(User, null=True)
enrolled_email = models.CharField(max_length=255, db_index=True)
time_stamp = models.DateTimeField(auto_now_add=True, null=True)
state_transition = models.CharField(max_length=255, choices=TRANSITION_STATES)
reason = models.TextField(null=True)
@classmethod
def create_manual_enrollment_audit(cls, user, email, state_transition, reason, enrollment=None):
"""
saves the student manual enrollment information
"""
cls.objects.create(
enrolled_by=user,
enrolled_email=email,
state_transition=state_transition,
reason=reason,
enrollment=enrollment
)
@classmethod
def get_manual_enrollment_by_email(cls, email):
"""
if matches returns the most recent entry in the table filtered by email else returns None.
"""
try:
manual_enrollment = cls.objects.filter(enrolled_email=email).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
@classmethod
def get_manual_enrollment(cls, enrollment):
"""
if matches returns the most recent entry in the table filtered by enrollment else returns None,
"""
try:
manual_enrollment = cls.objects.filter(enrollment=enrollment).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
class CourseEnrollmentAllowed(models.Model):
"""
Table of users (specified by email address strings) who are allowed to enroll in a specified course.
The user may or may not (yet) exist. Enrollment by users listed in this table is allowed
even if the enrollment time window is past.
"""
email = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
auto_enroll = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
class Meta(object):
unique_together = (('email', 'course_id'),)
def __unicode__(self):
return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created)
@classmethod
def may_enroll_and_unenrolled(cls, course_id):
"""
Return QuerySet of students who are allowed to enroll in a course.
Result excludes students who have already enrolled in the
course.
`course_id` identifies the course for which to compute the QuerySet.
"""
enrolled = CourseEnrollment.objects.users_enrolled_in(course_id=course_id).values_list('email', flat=True)
return CourseEnrollmentAllowed.objects.filter(course_id=course_id).exclude(email__in=enrolled)
@total_ordering
class CourseAccessRole(models.Model):
"""
Maps users to org, courses, and roles. Used by student.roles.CourseRole and OrgRole.
To establish a user as having a specific role over all courses in the org, create an entry
without a course_id.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User)
# blank org is for global group based roles such as course creator (may be deprecated)
org = models.CharField(max_length=64, db_index=True, blank=True)
# blank course_id implies org wide role
course_id = CourseKeyField(max_length=255, db_index=True, blank=True)
role = models.CharField(max_length=64, db_index=True)
class Meta(object):
unique_together = ('user', 'org', 'course_id', 'role')
@property
def _key(self):
"""
convenience function to make eq overrides easier and clearer. arbitrary decision
that role is primary, followed by org, course, and then user
"""
return (self.role, self.org, self.course_id, self.user_id)
def __eq__(self, other):
"""
Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we
just want to compare roles w/o doing another fetch.
"""
return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
"""
Lexigraphic sort
"""
return self._key < other._key # pylint: disable=protected-access
def __unicode__(self):
return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id)
#### Helper methods for use from python manage.py shell and other classes.
def get_user_by_username_or_email(username_or_email):
"""
Return a User object, looking up by email if username_or_email contains a
'@', otherwise by username.
Raises:
User.DoesNotExist is lookup fails.
"""
if '@' in username_or_email:
return User.objects.get(email=username_or_email)
else:
return User.objects.get(username=username_or_email)
def get_user(email):
user = User.objects.get(email=email)
u_prof = UserProfile.objects.get(user=user)
return user, u_prof
def user_info(email):
user, u_prof = get_user(email)
print "User id", user.id
print "Username", user.username
print "E-mail", user.email
print "Name", u_prof.name
print "Location", u_prof.location
print "Language", u_prof.language
return user, u_prof
def change_email(old_email, new_email):
user = User.objects.get(email=old_email)
user.email = new_email
user.save()
def change_name(email, new_name):
_user, u_prof = get_user(email)
u_prof.name = new_name
u_prof.save()
def user_count():
print "All users", User.objects.all().count()
print "Active users", User.objects.filter(is_active=True).count()
return User.objects.all().count()
def active_user_count():
return User.objects.filter(is_active=True).count()
def create_group(name, description):
utg = UserTestGroup()
utg.name = name
utg.description = description
utg.save()
def add_user_to_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.add(User.objects.get(username=user))
utg.save()
def remove_user_from_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.remove(User.objects.get(username=user))
utg.save()
DEFAULT_GROUPS = {
'email_future_courses': 'Receive e-mails about future MITx courses',
'email_helpers': 'Receive e-mails about how to help with MITx',
'mitx_unenroll': 'Fully unenrolled -- no further communications',
'6002x_unenroll': 'Took and dropped 6002x'
}
def add_user_to_default_group(user, group):
try:
utg = UserTestGroup.objects.get(name=group)
except UserTestGroup.DoesNotExist:
utg = UserTestGroup()
utg.name = group
utg.description = DEFAULT_GROUPS[group]
utg.save()
utg.users.add(User.objects.get(username=user))
utg.save()
def create_comments_service_user(user):
if not settings.FEATURES['ENABLE_DISCUSSION_SERVICE']:
# Don't try--it won't work, and it will fill the logs with lots of errors
return
try:
cc_user = cc.User.from_django_user(user)
cc_user.save()
except Exception: # pylint: disable=broad-except
log = logging.getLogger("edx.discussion") # pylint: disable=redefined-outer-name
log.error(
"Could not create comments service user with id {}".format(user.id),
exc_info=True
)
# Define login and logout handlers here in the models file, instead of the views file,
# so that they are more likely to be loaded when a Studio user brings up the Studio admin
# page to login. These are currently the only signals available, so we need to continue
# identifying and logging failures separately (in views).
@receiver(user_logged_in)
def log_successful_login(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logins have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id))
else:
AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email))
@receiver(user_logged_out)
def log_successful_logout(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logouts have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Logout - user.id: {0}".format(request.user.id))
else:
AUDIT_LOG.info(u"Logout - {0}".format(request.user))
@receiver(user_logged_in)
@receiver(user_logged_out)
def enforce_single_login(sender, request, user, signal, **kwargs): # pylint: disable=unused-argument
"""
Sets the current session id in the user profile,
to prevent concurrent logins.
"""
if settings.FEATURES.get('PREVENT_CONCURRENT_LOGINS', False):
if signal == user_logged_in:
key = request.session.session_key
else:
key = None
if user:
user.profile.set_login_session(key)
class DashboardConfiguration(ConfigurationModel):
"""Dashboard Configuration settings.
Includes configuration options for the dashboard, which impact behavior and rendering for the application.
"""
recent_enrollment_time_delta = models.PositiveIntegerField(
default=0,
help_text="The number of seconds in which a new enrollment is considered 'recent'. "
"Used to display notifications."
)
@property
def recent_enrollment_seconds(self):
return self.recent_enrollment_time_delta
class LinkedInAddToProfileConfiguration(ConfigurationModel):
"""
LinkedIn Add to Profile Configuration
This configuration enables the "Add to Profile" LinkedIn
button on the student dashboard. The button appears when
users have a certificate available; when clicked,
users are sent to the LinkedIn site with a pre-filled
form allowing them to add the certificate to their
LinkedIn profile.
"""
MODE_TO_CERT_NAME = {
"honor": _(u"{platform_name} Honor Code Certificate for {course_name}"),
"verified": _(u"{platform_name} Verified Certificate for {course_name}"),
"professional": _(u"{platform_name} Professional Certificate for {course_name}"),
"no-id-professional": _(
u"{platform_name} Professional Certificate for {course_name}"
),
}
company_identifier = models.TextField(
help_text=_(
u"The company identifier for the LinkedIn Add-to-Profile button "
u"e.g 0_0dPSPyS070e0HsE9HNz_13_d11_"
)
)
# Deprecated
dashboard_tracking_code = models.TextField(default="", blank=True)
trk_partner_name = models.CharField(
max_length=10,
default="",
blank=True,
help_text=_(
u"Short identifier for the LinkedIn partner used in the tracking code. "
u"(Example: 'edx') "
u"If no value is provided, tracking codes will not be sent to LinkedIn."
)
)
def add_to_profile_url(self, course_key, course_name, cert_mode, cert_url, source="o", target="dashboard"):
"""Construct the URL for the "add to profile" button.
Arguments:
course_key (CourseKey): The identifier for the course.
course_name (unicode): The display name of the course.
cert_mode (str): The course mode of the user's certificate (e.g. "verified", "honor", "professional")
cert_url (str): The download URL for the certificate.
Keyword Arguments:
source (str): Either "o" (for onsite/UI), "e" (for emails), or "m" (for mobile)
target (str): An identifier for the occurrance of the button.
"""
params = OrderedDict([
('_ed', self.company_identifier),
('pfCertificationName', self._cert_name(course_name, cert_mode).encode('utf-8')),
('pfCertificationUrl', cert_url),
('source', source)
])
tracking_code = self._tracking_code(course_key, cert_mode, target)
if tracking_code is not None:
params['trk'] = tracking_code
return u'http://www.linkedin.com/profile/add?{params}'.format(
params=urlencode(params)
)
def _cert_name(self, course_name, cert_mode):
"""Name of the certification, for display on LinkedIn. """
return self.MODE_TO_CERT_NAME.get(
cert_mode,
_(u"{platform_name} Certificate for {course_name}")
).format(
platform_name=settings.PLATFORM_NAME,
course_name=course_name
)
def _tracking_code(self, course_key, cert_mode, target):
"""Create a tracking code for the button.
Tracking codes are used by LinkedIn to collect
analytics about certifications users are adding
to their profiles.
The tracking code format is:
&trk=[partner name]-[certificate type]-[date]-[target field]
In our case, we're sending:
&trk=edx-{COURSE ID}_{COURSE MODE}-{TARGET}
If no partner code is configured, then this will
return None, indicating that tracking codes are disabled.
Arguments:
course_key (CourseKey): The identifier for the course.
cert_mode (str): The enrollment mode for the course.
target (str): Identifier for where the button is located.
Returns:
unicode or None
"""
return (
u"{partner}-{course_key}_{cert_mode}-{target}".format(
partner=self.trk_partner_name,
course_key=unicode(course_key),
cert_mode=cert_mode,
target=target
)
if self.trk_partner_name else None
)
class EntranceExamConfiguration(models.Model):
"""
Represents a Student's entrance exam specific data for a single Course
"""
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
# if skip_entrance_exam is True, then student can skip entrance exam
# for the course
skip_entrance_exam = models.BooleanField(default=True)
class Meta(object):
unique_together = (('user', 'course_id'), )
def __unicode__(self):
return "[EntranceExamConfiguration] %s: %s (%s) = %s" % (
self.user, self.course_id, self.created, self.skip_entrance_exam
)
@classmethod
def user_can_skip_entrance_exam(cls, user, course_key):
"""
Return True if given user can skip entrance exam for given course otherwise False.
"""
can_skip = False
if is_entrance_exams_enabled():
try:
record = EntranceExamConfiguration.objects.get(user=user, course_id=course_key)
can_skip = record.skip_entrance_exam
except EntranceExamConfiguration.DoesNotExist:
can_skip = False
return can_skip
class LanguageField(models.CharField):
"""Represents a language from the ISO 639-1 language set."""
def __init__(self, *args, **kwargs):
"""Creates a LanguageField.
Accepts all the same kwargs as a CharField, except for max_length and
choices. help_text defaults to a description of the ISO 639-1 set.
"""
kwargs.pop('max_length', None)
kwargs.pop('choices', None)
help_text = kwargs.pop(
'help_text',
_("The ISO 639-1 language code for this language."),
)
super(LanguageField, self).__init__(
max_length=16,
choices=settings.ALL_LANGUAGES,
help_text=help_text,
*args,
**kwargs
)
add_introspection_rules([], [r"^student\.models\.LanguageField"])
class LanguageProficiency(models.Model):
"""
Represents a user's language proficiency.
Note that we have not found a way to emit analytics change events by using signals directly on this
model or on UserProfile. Therefore if you are changing LanguageProficiency values, it is important
to go through the accounts API (AccountsView) defined in
/edx-platform/openedx/core/djangoapps/user_api/accounts/views.py or its associated api method
(update_account_settings) so that the events are emitted.
"""
class Meta(object):
unique_together = (('code', 'user_profile'),)
user_profile = models.ForeignKey(UserProfile, db_index=True, related_name='language_proficiencies')
code = models.CharField(
max_length=16,
blank=False,
choices=settings.ALL_LANGUAGES,
help_text=_("The ISO 639-1 language code for this language.")
)
class CourseEnrollmentAttribute(models.Model):
"""
Provide additional information about the user's enrollment.
"""
enrollment = models.ForeignKey(CourseEnrollment, related_name="attributes")
namespace = models.CharField(
max_length=255,
help_text=_("Namespace of enrollment attribute")
)
name = models.CharField(
max_length=255,
help_text=_("Name of the enrollment attribute")
)
value = models.CharField(
max_length=255,
help_text=_("Value of the enrollment attribute")
)
def __unicode__(self):
"""Unicode representation of the attribute. """
return u"{namespace}:{name}, {value}".format(
namespace=self.namespace,
name=self.name,
value=self.value,
)
@classmethod
def add_enrollment_attr(cls, enrollment, data_list):
"""Delete all the enrollment attributes for the given enrollment and
add new attributes.
Args:
enrollment(CourseEnrollment): 'CourseEnrollment' for which attribute is to be added
data(list): list of dictionaries containing data to save
"""
cls.objects.filter(enrollment=enrollment).delete()
attributes = [
cls(enrollment=enrollment, namespace=data['namespace'], name=data['name'], value=data['value'])
for data in data_list
]
cls.objects.bulk_create(attributes)
@classmethod
def get_enrollment_attributes(cls, enrollment):
"""Retrieve list of all enrollment attributes.
Args:
enrollment(CourseEnrollment): 'CourseEnrollment' for which list is to retrieve
Returns: list
Example:
>>> CourseEnrollmentAttribute.get_enrollment_attributes(CourseEnrollment)
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
"""
return [
{
"namespace": attribute.namespace,
"name": attribute.name,
"value": attribute.value,
}
for attribute in cls.objects.filter(enrollment=enrollment)
]
class EnrollmentRefundConfiguration(ConfigurationModel):
"""
Configuration for course enrollment refunds.
"""
# TODO: Django 1.8 introduces a DurationField
# (https://docs.djangoproject.com/en/1.8/ref/models/fields/#durationfield)
# for storing timedeltas which uses MySQL's bigint for backing
# storage. After we've completed the Django upgrade we should be
# able to replace this field with a DurationField named
# `refund_window` without having to run a migration or change
# other code.
refund_window_microseconds = models.BigIntegerField(
default=1209600000000,
help_text=_(
"The window of time after enrolling during which users can be granted"
" a refund, represented in microseconds. The default is 14 days."
)
)
@property
def refund_window(self):
"""Return the configured refund window as a `datetime.timedelta`."""
return timedelta(microseconds=self.refund_window_microseconds)
@refund_window.setter
def refund_window(self, refund_window):
"""Set the current refund window to the given timedelta."""
self.refund_window_microseconds = int(refund_window.total_seconds() * 1000000)
|
sthoma/flask-jumpstart
|
refs/heads/master
|
tests/backend_tests/__init__.py
|
12133432
| |
KaranToor/MA450
|
refs/heads/master
|
google-cloud-sdk/.install/.backup/lib/third_party/prompt_toolkit/eventloop/__init__.py
|
12133432
| |
Mixser/django
|
refs/heads/master
|
django/contrib/staticfiles/templatetags/__init__.py
|
12133432
| |
Daarknes/Gadakeco
|
refs/heads/master
|
src/context/__init__.py
|
12133432
| |
lamby/ansible-modules-core
|
refs/heads/devel
|
system/group.py
|
81
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group
author: "Stephen Fromm (@sfromm)"
version_added: "0.0.2"
short_description: Add or remove groups
requirements: [ groupadd, groupdel, groupmod ]
description:
- Manage presence of groups on a host.
options:
name:
required: true
description:
- Name of the group to manage.
gid:
required: false
description:
- Optional I(GID) to set for the group.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group should be present or not on the remote host.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If I(yes), indicates that the group created is a system group.
'''
EXAMPLES = '''
# Example group command from Ansible Playbooks
- group: name=somegroup state=present
'''
import grp
import syslog
import platform
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Group, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.syslogging = False
def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(kwargs[key])
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id='+kwargs[key])
elif key == 'system' and kwargs[key] == True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id='+kwargs[key])
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac OS X Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manupulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'create' ]
if self.gid is not None:
cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'delete' ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self, gid=None):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'edit' ]
if gid is not None:
cmd += [ '-i', gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append('%d' % int(self.gid))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
gid=dict(default=None, type='str'),
system=dict(default=False, type='bool'),
),
supports_check_mode=True
)
group = Group(module)
if group.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
ZECTBynmo/gyp-builder
|
refs/heads/master
|
gyp/pylib/gyp/MSVSNew.py
|
225
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation for SCons."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution:
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# TODO(rspangler): Should omit this section if there are no folders
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
|
aaboffill/django-allmedia
|
refs/heads/master
|
media/validators.py
|
1
|
# coding=utf-8
import magic
from django.core.exceptions import ValidationError
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
class FileFieldValidator(object):
def __init__(self, mime_types, max_size):
super(FileFieldValidator, self).__init__()
self.mime_types = mime_types
self.max_size = max_size
def __call__(self, value):
try:
mime = magic.from_buffer(value.read(1024), mime=True)
if mime in self.mime_types:
if value.size > self.max_size:
raise ValidationError(_("Please keep file size under %(max_size)s. Current file size %(size)s.") % {
'max_size': filesizeformat(self.max_size),
'size': filesizeformat(value.size)
})
else:
raise ValidationError(_("File type %(mime)s is not supported.") % {'mime': mime})
except AttributeError as e:
raise ValidationError("This value could not be validated for file type" % value)
|
frank10704/DF_GCS_W
|
refs/heads/master
|
MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/lib2to3/pgen2/grammar.py
|
55
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
pprint(self.symbol2number)
print "n2s"
pprint(self.number2symbol)
print "states"
pprint(self.states)
print "dfas"
pprint(self.dfas)
print "labels"
pprint(self.labels)
print "start", self.start
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
|
loretoparisi/nupic
|
refs/heads/master
|
examples/opf/experiments/missing_record/simple_0/description.py
|
32
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'windowSize': 25,
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'timestamp_timeOfDay': None,
'timestamp_dayOfWeek': None,
'field2': None,
}
},
'clParams': {
'clVerbosity': 0,
}
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
|
jwren/intellij-community
|
refs/heads/master
|
python/helpers/py3only/docutils/parsers/__init__.py
|
44
|
# $Id: __init__.py 7646 2013-04-17 14:17:37Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
if sys.version_info < (2,5):
from docutils._compat import __import__
class Parser(Component):
component_type = 'parser'
config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if parser_name in _parser_aliases:
parser_name = _parser_aliases[parser_name]
try:
module = __import__(parser_name, globals(), locals(), level=1)
except ImportError:
module = __import__(parser_name, globals(), locals(), level=0)
return module.Parser
|
achanda/flocker
|
refs/heads/master
|
flocker/common/functional/test_ipc.py
|
15
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for IPC.
"""
from twisted.internet.threads import deferToThread
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from .. import ProcessNode
from ..test.test_ipc import make_inode_tests
from ...testtools.ssh import create_ssh_server
def make_prefixless_processnode(test_case):
"""
Create a ``ProcessNode`` that just runs the given command with no
prefix.
:return: ``ProcessNode`` instance.
"""
return ProcessNode(initial_command_arguments=[])
class ProcessINodeTests(make_inode_tests(make_prefixless_processnode)):
"""``INode`` tests for ``ProcessNode``."""
class ProcessNodeTests(TestCase):
"""Tests for ``ProcessNode``."""
def test_run_runs_command(self):
"""
``ProcessNode.run`` runs a command that is a combination of the
initial arguments and the ones given to ``run()``.
"""
node = ProcessNode(initial_command_arguments=[b"sh"])
temp_file = self.mktemp()
with node.run([b"-c", b"echo hello > " + temp_file]):
pass
self.assertEqual(FilePath(temp_file).getContent(), b"hello\n")
def test_run_stdin(self):
"""
``ProcessNode.run()`` context manager returns the subprocess' stdin.
"""
node = ProcessNode(initial_command_arguments=[b"sh", b"-c"])
temp_file = self.mktemp()
with node.run([b"cat > " + temp_file]) as stdin:
stdin.write(b"hello ")
stdin.write(b"world")
self.assertEqual(FilePath(temp_file).getContent(), b"hello world")
def test_run_bad_exit(self):
"""
``run()`` raises ``IOError`` if subprocess has non-zero exit code.
"""
node = ProcessNode(initial_command_arguments=[])
nonexistent = self.mktemp()
try:
with node.run([b"ls", nonexistent]):
pass
except IOError:
pass
else:
self.fail("No IOError")
def test_get_output_runs_command(self):
"""
``ProcessNode.get_output()`` runs a command that is the combination of
the initial arguments and the ones given to ``get_output()``.
"""
node = ProcessNode(initial_command_arguments=[b"sh"])
temp_file = self.mktemp()
node.get_output([b"-c", b"echo hello > " + temp_file])
self.assertEqual(FilePath(temp_file).getContent(), b"hello\n")
def test_get_output_result(self):
"""
``get_output()`` returns the output of the command.
"""
node = ProcessNode(initial_command_arguments=[])
result = node.get_output([b"echo", b"-n", b"hello"])
self.assertEqual(result, b"hello")
def test_get_output_bad_exit(self):
"""
``get_output()`` raises ``IOError`` if subprocess has non-zero exit
code.
"""
node = ProcessNode(initial_command_arguments=[])
nonexistent = self.mktemp()
self.assertRaises(IOError, node.get_output, [b"ls", nonexistent])
def make_sshnode(test_case):
"""
Create a ``ProcessNode`` that can SSH into the local machine.
:param TestCase test_case: The test case to use.
:return: A ``ProcessNode`` instance.
"""
server = create_ssh_server(FilePath(test_case.mktemp()))
test_case.addCleanup(server.restore)
return ProcessNode.using_ssh(
host=unicode(server.ip).encode("ascii"), port=server.port,
username=b"root", private_key=server.key_path)
class SSHProcessNodeTests(TestCase):
"""Tests for ``ProcessNode.with_ssh``."""
def test_runs_command(self):
"""
``run()`` on a SSH ``ProcessNode`` runs the command on the machine
being ssh'd into.
"""
node = make_sshnode(self)
temp_file = FilePath(self.mktemp())
def go():
with node.run([b"python", b"-c",
b"file('%s', 'w').write(b'hello')"
% (temp_file.path,)]):
pass
return temp_file.getContent()
d = deferToThread(go)
def got_data(data):
self.assertEqual(data, b"hello")
d.addCallback(got_data)
return d
def test_run_stdin(self):
"""
``run()`` on a SSH ``ProcessNode`` writes to the remote command's
stdin.
"""
node = make_sshnode(self)
temp_file = FilePath(self.mktemp())
def go():
with node.run([b"python", b"-c",
b"import sys; "
b"file('%s', 'wb').write(sys.stdin.read())"
% (temp_file.path,)]) as stdin:
stdin.write(b"hello ")
stdin.write(b"there")
return temp_file.getContent()
d = deferToThread(go)
def got_data(data):
self.assertEqual(data, b"hello there")
d.addCallback(got_data)
return d
def test_get_output(self):
"""
``get_output()`` returns the command's output.
"""
node = make_sshnode(self)
temp_file = FilePath(self.mktemp())
temp_file.setContent(b"hello!")
def go():
return node.get_output([b"python", b"-c",
b"import sys; "
b"sys.stdout.write(file('%s').read())"
% (temp_file.path,)])
d = deferToThread(go)
def got_data(data):
self.assertEqual(data, b"hello!")
d.addCallback(got_data)
return d
class MutatingProcessNode(ProcessNode):
"""Mutate the command being run in order to make tests work.
Come up with something better in
https://clusterhq.atlassian.net/browse/FLOC-125
"""
def __init__(self, to_service):
"""
:param to_service: The VolumeService to which a push is being done.
"""
self.to_service = to_service
ProcessNode.__init__(self, initial_command_arguments=[])
def _mutate(self, remote_command):
"""
Add the pool and mountpoint arguments, which aren't necessary in real
code.
:param remote_command: Original command arguments.
:return: Modified command arguments.
"""
return remote_command[:1] + [
b"--pool", self.to_service.pool._name,
b"--mountpoint", self.to_service.pool._mount_root.path
] + remote_command[1:]
def run(self, remote_command):
return ProcessNode.run(self, self._mutate(remote_command))
def get_output(self, remote_command):
return ProcessNode.get_output(self, self._mutate(remote_command))
|
Amunak/AtomicBot
|
refs/heads/master
|
src/atomicbot/cli.py
|
1
|
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -matomicbot` python will execute
``__main__.py`` as a script. That means there won't be any
``atomicbot.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``atomicbot.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import sys
def main(argv=sys.argv):
"""
Args:
argv (list): List of arguments
Returns:
int: A return code
Does stuff.
"""
print(argv)
return 0
|
ecotux/objectDetection
|
refs/heads/master
|
02preprocessing1.py
|
1
|
#
# Optional: save blurred images
#
import re
import os
import cv2
def preprocess1(data):
img = cv2.GaussianBlur(data, (5,5), 0)
img = cv2.bilateralFilter(img,9,75,75)
img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
return img
inputDir = 'crossData/'
outputDir = 'PREcrossData/'
for nameFile in os.listdir(inputDir):
match1=re.search(r"object(\d+)",nameFile)
if match1:
print nameFile
src = cv2.imread(inputDir+nameFile)
data = preprocess1(src)
cv2.imwrite(outputDir+nameFile, data)
|
bianchimro/django-cms-fragments
|
refs/heads/master
|
cms_fragments/admin.py
|
1
|
from django.contrib import admin
from cms.admin.placeholderadmin import PlaceholderAdmin
from models import *
admin.site.register(Fragment)
class FragmentMembershipAdminInline(admin.StackedInline):
model = FragmentMembership
extra = 0
class FragmentCollectionAdmin(admin.ModelAdmin):
model = FragmentCollection
inlines = [FragmentMembershipAdminInline]
admin.site.register(FragmentCollection, FragmentCollectionAdmin)
class FragmentBlockMembershipInline(admin.StackedInline):
model = FragmentBlockMembership
class FragmentRegionAdmin(admin.ModelAdmin):
model = FragmentRegion
inlines = [FragmentBlockMembershipInline]
admin.site.register(FragmentBlock, PlaceholderAdmin)
admin.site.register(FragmentRegion, FragmentRegionAdmin)
|
ogenstad/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/softlayer/sl_vm.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sl_vm
short_description: create or cancel a virtual instance in SoftLayer
description:
- Creates or cancels SoftLayer instances.
- When created, optionally waits for it to be 'running'.
version_added: "2.1"
options:
instance_id:
description:
- Instance Id of the virtual instance to perform action option.
hostname:
description:
- Hostname to be provided to a virtual instance.
domain:
description:
- Domain name to be provided to a virtual instance.
datacenter:
description:
- Datacenter for the virtual instance to be deployed.
tags:
description:
- Tag or list of tags to be provided to a virtual instance.
hourly:
description:
- Flag to determine if the instance should be hourly billed.
type: bool
default: 'yes'
private:
description:
- Flag to determine if the instance should be private only.
type: bool
default: 'no'
dedicated:
description:
- Flag to determine if the instance should be deployed in dedicated space.
type: bool
default: 'no'
local_disk:
description:
- Flag to determine if local disk should be used for the new instance.
type: bool
default: 'yes'
cpus:
description:
- Count of cpus to be assigned to new virtual instance.
required: true
memory:
description:
- Amount of memory to be assigned to new virtual instance.
required: true
disks:
description:
- List of disk sizes to be assigned to new virtual instance.
required: true
default: [ 25 ]
os_code:
description:
- OS Code to be used for new virtual instance.
image_id:
description:
- Image Template to be used for new virtual instance.
nic_speed:
description:
- NIC Speed to be assigned to new virtual instance.
default: 10
public_vlan:
description:
- VLAN by its Id to be assigned to the public NIC.
private_vlan:
description:
- VLAN by its Id to be assigned to the private NIC.
ssh_keys:
description:
- List of ssh keys by their Id to be assigned to a virtual instance.
post_uri:
description:
- URL of a post provisioning script to be loaded and executed on virtual instance.
state:
description:
- Create, or cancel a virtual instance.
- Specify C(present) for create, C(absent) to cancel.
choices: [ absent, present ]
default: present
wait:
description:
- Flag used to wait for active status before returning.
type: bool
default: 'yes'
wait_time:
description:
- Time in seconds before wait returns.
default: 600
requirements:
- python >= 2.6
- softlayer >= 4.1.1
author:
- Matt Colton (@mcltn)
'''
EXAMPLES = '''
- name: Build instance
hosts: localhost
gather_facts: no
tasks:
- name: Build instance request
sl_vm:
hostname: instance-1
domain: anydomain.com
datacenter: dal09
tags: ansible-module-test
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks: [25]
os_code: UBUNTU_LATEST
wait: no
- name: Build additional instances
hosts: localhost
gather_facts: no
tasks:
- name: Build instances request
sl_vm:
hostname: "{{ item.hostname }}"
domain: "{{ item.domain }}"
datacenter: "{{ item.datacenter }}"
tags: "{{ item.tags }}"
hourly: "{{ item.hourly }}"
private: "{{ item.private }}"
dedicated: "{{ item.dedicated }}"
local_disk: "{{ item.local_disk }}"
cpus: "{{ item.cpus }}"
memory: "{{ item.memory }}"
disks: "{{ item.disks }}"
os_code: "{{ item.os_code }}"
ssh_keys: "{{ item.ssh_keys }}"
wait: "{{ item.wait }}"
with_items:
- hostname: instance-2
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- hostname: instance-3
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: yes
- name: Cancel instances
hosts: localhost
gather_facts: no
tasks:
- name: Cancel by tag
sl_vm:
state: absent
tags: ansible-module-test
'''
# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
RETURN = '''# '''
import json
import time
try:
import SoftLayer
from SoftLayer import VSManager
HAS_SL = True
vsManager = VSManager(SoftLayer.create_client_from_env())
except ImportError:
HAS_SL = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
# TODO: get this info from API
STATES = ['present', 'absent']
DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'fra02', 'hkg02', 'hou02', 'lon02', 'mel01', 'mex01', 'mil01', 'mon01',
'osl01', 'par01', 'sjc01', 'sjc03', 'sao01', 'sea01', 'sng01', 'syd01', 'tok02', 'tor01', 'wdc01', 'wdc04']
CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
INITIALDISK_SIZES = [25, 100]
LOCALDISK_SIZES = [25, 100, 150, 200, 300]
SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
NIC_SPEEDS = [10, 100, 1000]
def create_virtual_instance(module):
instances = vsManager.list_instances(
hostname=module.params.get('hostname'),
domain=module.params.get('domain'),
datacenter=module.params.get('datacenter')
)
if instances:
return False, None
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
module.params['image_id'] = ''
elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
module.params['os_code'] = ''
module.params['disks'] = [] # Blank out disks since it will use the template
else:
return False, None
tags = module.params.get('tags')
if isinstance(tags, list):
tags = ','.join(map(str, module.params.get('tags')))
instance = vsManager.create_instance(
hostname=module.params.get('hostname'),
domain=module.params.get('domain'),
cpus=module.params.get('cpus'),
memory=module.params.get('memory'),
hourly=module.params.get('hourly'),
datacenter=module.params.get('datacenter'),
os_code=module.params.get('os_code'),
image_id=module.params.get('image_id'),
local_disk=module.params.get('local_disk'),
disks=module.params.get('disks'),
ssh_keys=module.params.get('ssh_keys'),
nic_speed=module.params.get('nic_speed'),
private=module.params.get('private'),
public_vlan=module.params.get('public_vlan'),
private_vlan=module.params.get('private_vlan'),
dedicated=module.params.get('dedicated'),
post_uri=module.params.get('post_uri'),
tags=tags,
)
if instance is not None and instance['id'] > 0:
return True, instance
else:
return False, None
def wait_for_instance(module, id):
instance = None
completed = False
wait_timeout = time.time() + module.params.get('wait_time')
while not completed and wait_timeout > time.time():
try:
completed = vsManager.wait_for_ready(id, 10, 2)
if completed:
instance = vsManager.get_instance(id)
except:
completed = False
return completed, instance
def cancel_instance(module):
canceled = True
if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
tags = module.params.get('tags')
if isinstance(tags, string_types):
tags = [module.params.get('tags')]
instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
for instance in instances:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
try:
vsManager.cancel_instance(instance['id'])
except:
canceled = False
else:
return False, None
return canceled, None
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(type='str'),
hostname=dict(type='str'),
domain=dict(type='str'),
datacenter=dict(type='str', choices=DATACENTERS),
tags=dict(type='str'),
hourly=dict(type='bool', default=True),
private=dict(type='bool', default=False),
dedicated=dict(type='bool', default=False),
local_disk=dict(type='bool', default=True),
cpus=dict(type='int', choices=CPU_SIZES),
memory=dict(type='int', choices=MEMORY_SIZES),
disks=dict(type='list', default=[25]),
os_code=dict(type='str'),
image_id=dict(type='str'),
nic_speed=dict(type='int', choices=NIC_SPEEDS),
public_vlan=dict(type='str'),
private_vlan=dict(type='str'),
ssh_keys=dict(type='list', default=[]),
post_uri=dict(type='str'),
state=dict(type='str', default='present', choices=STATES),
wait=dict(type='bool', default=True),
wait_time=dict(type='int', default=600),
)
)
if not HAS_SL:
module.fail_json(msg='softlayer python library required for this module')
if module.params.get('state') == 'absent':
(changed, instance) = cancel_instance(module)
elif module.params.get('state') == 'present':
(changed, instance) = create_virtual_instance(module)
if module.params.get('wait') is True and instance:
(changed, instance) = wait_for_instance(module, instance['id'])
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
if __name__ == '__main__':
main()
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/operations/_security_rules_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2020_05_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
himanisingla16/activitystreams-travis
|
refs/heads/master
|
components/lib/kafka-pipeline/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
1355
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
aeliot/openthread
|
refs/heads/master
|
tools/harness-automation/cases/commissioner_9_2_3.py
|
16
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_9_2_3(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '9 2 3'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
srgblnch/Rijndael
|
refs/heads/master
|
gRijndael/ThirdLevel.py
|
1
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__email__ = "srgblnchtrn@protonmail.ch"
__copyright__ = "Copyright 2013 Sergi Blanch-Torne"
__license__ = "GPLv3+"
__status__ = "development"
'''As a descendant level design, this file has the classes definitions of the
levels above the Rijndael operations.
'''
from .Logger import Logger as _Logger
binlen = lambda x: len(bin(x))-2
class Word:
def __init__(self, nRows, wordSize):
self.__nRows = nRows
self.__wordSize = wordSize
def toList(self, superWord):
'''Split an number in a set of integers with wordSize bits each
Input: <integer> superWord
Output: <integer array> wordsArray
descent methods: []
auxiliary methods: []
'''
wordsArray = []
mask = int('0b'+'1'*self.__wordSize, 2)
for i in range(self.__nRows):
wordsArray.append((superWord >> self.__wordSize*i) & mask)
return wordsArray
def fromList(self, wordsArray):
'''Concatenate a set of integers (with wordSize bits each) into one
integer with size wordSize*len(wordList)
Input: <integer array> wordsArray
Output: <integer> superWord
descent methods: []
auxiliary methods: []
'''
superWord = 0
for j in range(self.__nRows):
superWord += wordsArray[j] << self.__wordSize*(self.__nRows-j-1)
return superWord
class Long:
def __init__(self, wordSize):
self.__wordSize = wordSize
def toArray(self, input, length):
'''Auxiliary method to unpack an integer to a set of smaller integers
in an array. The size of each of the integers in the set have the
wordSize
Input: <integer>
Output: <integer array>
'''
if input > int('0b'+('1'*length), 2):
raise Exception("(long2array)", "Too big input for %d length"
% (length))
o = []
# cut the input blocs of the word size
mask = (int('0b'+('1'*self.__wordSize), 2) << (length-self.__wordSize))
nBlocks = int(length/self.__wordSize)
for i in range(nBlocks):
e = (input & mask) >> (((nBlocks)-i-1)*self.__wordSize)
o.append(int(e))
mask >>= self.__wordSize
return o
def fromArray(self, input, length):
'''Auxiliary method to pack an array of integers (with #wordSize bits)
onto one integer.
Input: <integer array>
Output: <integer>
descent methods: []
auxiliar methods: []
'''
o = 0
nBlocks = int(length/self.__wordSize)
for i in range(nBlocks):
o |= (input[i] << (((nBlocks)-i-1)*self.__wordSize))
return o
def shift(l, n):
# Binary doesn't need a class ----
'''cyclic rotation of the list 'l' y 'n' elements.
Positive n's means left, negative n's means right.
Input:
Output:
'''
return l[n:]+l[:n]
class State(_Logger):
def __init__(self, nRows, nColumns, *args, **kwargs):
super(State, self).__init__(*args, **kwargs)
self.__nRows = nRows
self.__nColumns = nColumns
def fromArray(self, input):
'''Given a one dimensional array, convert it to a r*c array following:
s[r,c] = in[r+rc] for 0<=r<nRows and 0<=c<nColumns
Input: <integer array> 1d
Output: <integer arrays> 2d
'''
# FIXME: what happens if the size of input is not r*c? ----
# if exceeds, the rest are ignored;
# if not enough, empty cells
state = [None] * self.__nRows
for i in range(len(input)):
row = i % self.__nRows
if row == i:
state[row] = [input[i]]
else:
state[row].append(input[i])
for i in range(self.__nRows):
self._debug_stream("state[%d]" % (i), state[i])
self._debug_stream("makeArray", state)
return state
def toArray(self, state):
'''From a r*c array, returns a one dimensional array following:
out[r+rc] = s[r,c] for 0<=r<nRows and 0<=c<nColumns
Input: <integer arrays> 2d
Output: <integer array> 1d
'''
output = []
for j in range(self.__nColumns):
for i in range(self.__nRows):
output.append(state[i][j])
self._debug_stream("unmakeArray", output)
return output
|
tkurnosova/selenium
|
refs/heads/master
|
py/selenium/webdriver/chrome/webdriver.py
|
30
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .remote_connection import ChromeRemoteConnection
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = self.create_options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=ChromeRemoteConnection(
remote_server_addr=self.service.service_url),
desired_capabilities=desired_capabilities)
except:
self.quit()
raise
self._is_remote = False
def launch_app(self, id):
"""Launches Chrome app specified by id."""
return self.execute("launchApp", {'id': id})
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
def create_options(self):
return Options()
|
mindprince/test-infra
|
refs/heads/master
|
metrics/bigquery.py
|
7
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs bigquery metrics and uploads the result to GCS."""
import argparse
import calendar
import glob
import json
import os
import pipes
import re
import subprocess
import sys
import time
import traceback
import influxdb
import requests
import yaml
def check(cmd, **kwargs):
"""Logs and runs the command, raising on errors."""
print >>sys.stderr, 'Run:', ' '.join(pipes.quote(c) for c in cmd),
if hasattr(kwargs.get('stdout'), 'name'):
print >>sys.stderr, ' > %s' % kwargs['stdout'].name
else:
print
# If 'stdin' keyword arg is a string run command and communicate string to stdin
if 'stdin' in kwargs and isinstance(kwargs['stdin'], str):
in_string = kwargs['stdin']
kwargs['stdin'] = subprocess.PIPE
proc = subprocess.Popen(cmd, **kwargs)
proc.communicate(input=in_string)
return
subprocess.check_call(cmd, **kwargs)
def validate_metric_name(name):
"""Raise ValueError if name is non-trivial."""
# Regex '$' symbol matches an optional terminating new line
# so we have to check that the name
# doesn't have one if the regex matches.
if not re.match(r'^[\w-]+$', name) or name[-1] == '\n':
raise ValueError(name)
def do_jq(jq_filter, data_filename, out_filename, jq_bin='jq'):
"""Executes jq on a file and outputs the results to a file."""
with open(out_filename, 'w') as out_file:
check([jq_bin, jq_filter, data_filename], stdout=out_file)
class BigQuerier(object):
def __init__(self, project, bucket_path, backfill_days, influx_client):
if not project:
raise ValueError('project', project)
self.project = project
if not bucket_path:
print >>sys.stderr, 'Not uploading results, no bucket specified.'
self.prefix = bucket_path
self.influx = influx_client
self.backfill_days = backfill_days
def do_query(self, query, out_filename):
"""Executes a bigquery query, outputting the results to a file."""
cmd = [
'bq', 'query', '--format=prettyjson',
'--project_id=%s' % self.project,
'-n100000', # Results may have more than 100 rows
query,
]
with open(out_filename, 'w') as out_file:
check(cmd, stdout=out_file)
print # bq doesn't output a trailing newline
def jq_upload(self, config, data_filename):
"""Filters a data file with jq and uploads the results to GCS."""
filtered = 'daily-%s.json' % time.strftime('%Y-%m-%d')
latest = '%s-latest.json' % config['metric']
do_jq(config['jqfilter'], data_filename, filtered)
self.copy(filtered, os.path.join(config['metric'], filtered))
self.copy(filtered, latest)
def influx_upload(self, config, data_filename):
"""Uses jq to extract InfluxDB time series points then uploads to DB."""
points = '%s-data-points.json' % config['metric']
jq_point = config.get('measurements', {}).get('jq', None)
if not jq_point:
return
do_jq(jq_point, data_filename, points)
with open(points) as points_file:
try:
points = json.load(points_file)
except ValueError:
print >>sys.stderr, "No influxdb points to upload.\n"
return
if not self.influx:
print >>sys.stderr, (
'Skipping influxdb upload of metric %s, no db configured.\n'
% config['metric']
)
return
points = [ints_to_floats(point) for point in points]
self.influx.write_points(points, time_precision='s', batch_size=100)
def run_metric(self, config):
"""Runs query and filters results, uploading data to GCS."""
raw = 'raw-%s.json' % time.strftime('%Y-%m-%d')
self.update_query(config)
self.do_query(config['query'], raw)
self.copy(raw, os.path.join(config['metric'], raw))
consumer_error = False
for consumer in [self.jq_upload, self.influx_upload]:
try:
consumer(config, raw)
except (
ValueError,
KeyError,
IOError,
requests.exceptions.ConnectionError,
influxdb.client.InfluxDBClientError,
influxdb.client.InfluxDBServerError,
):
print >>sys.stderr, traceback.format_exc()
consumer_error = True
if consumer_error:
raise ValueError('Error(s) were thrown by query result consumers.')
def copy(self, src, dest):
"""Use gsutil to copy src to <bucket_path>/dest with minimal caching."""
if not self.prefix:
return # no destination
dest = os.path.join(self.prefix, dest)
check(['gsutil', '-h', 'Cache-Control:max-age=60', 'cp', src, dest])
def update_query(self, config):
"""Modifies config['query'] based on the metric configuration."""
# Currently the only modification that is supported is injecting the
# timestamp of the most recent influxdb data for a given metric.
# (For backfilling)
measure = config.get('measurements', {}).get('backfill')
if not measure:
return
if self.influx:
# To get the last data point timestamp we must also fetch a field.
# So first find a field that we can query if the metric exists.
points = self.influx.query('show field keys from %s limit 1' % measure)
points = list(points.get_points())
field = points and points[0].get('fieldKey')
last_time = None
if field:
results = self.influx.query(
'select last(%s), time from %s limit 1' % (field, measure)
)
last_time = next(results.get_points(), {}).get('time')
if last_time:
# format time properly
last_time = time.strptime(last_time, '%Y-%m-%dT%H:%M:%SZ')
last_time = calendar.timegm(last_time)
if not last_time:
last_time = int(time.time() - (60*60*24*self.backfill_days))
else:
# InfluxDB is not enabled so skip backfill so use default
last_time = int(time.time() - (60*60*24)*self.backfill_days)
# replace tag with formatted time
config['query'] = config['query'].replace('<LAST_DATA_TIME>', str(last_time))
def all_configs(search='**.yaml'):
"""Returns config files in the metrics dir."""
return glob.glob(os.path.join(
os.path.dirname(__file__), 'configs', search))
def make_influx_client():
"""Make an InfluxDB client from config at path $VELODROME_INFLUXDB_CONFIG"""
if 'VELODROME_INFLUXDB_CONFIG' not in os.environ:
return None
with open(os.environ['VELODROME_INFLUXDB_CONFIG']) as config_file:
config = json.load(config_file)
def check_config(field):
if not field in config:
raise ValueError('DB client config needs field \'%s\'' % field)
check_config('host')
check_config('port')
check_config('user')
check_config('password')
return influxdb.InfluxDBClient(
host=config['host'],
port=config['port'],
username=config['user'],
password=config['password'],
database='metrics',
)
def ints_to_floats(point):
for key, val in point.iteritems():
if key == 'time':
continue
if isinstance(val, int):
point[key] = float(val)
elif isinstance(val, dict):
point[key] = ints_to_floats(val)
return point
def main(configs, project, bucket_path, backfill_days):
"""Loads metric config files and runs each metric."""
queryer = BigQuerier(project, bucket_path, backfill_days, make_influx_client())
# the 'bq show' command is called as a hack to dodge the config prompts that bq presents
# the first time it is run. A newline is passed to stdin to skip the prompt for default project
# when the service account in use has access to multiple projects.
check(['bq', 'show'], stdin='\n')
errs = []
for path in configs or all_configs():
try:
with open(path) as config_raw:
config = yaml.safe_load(config_raw)
if not config:
raise ValueError('invalid yaml: %s.' % path)
config['metric'] = config['metric'].strip()
validate_metric_name(config['metric'])
queryer.run_metric(config)
except (
ValueError,
KeyError,
IOError,
subprocess.CalledProcessError,
):
print >>sys.stderr, traceback.format_exc()
errs.append(path)
if errs:
print 'Failed %d configs: %s' % (len(errs), ', '.join(errs))
sys.exit(1)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--config', action='append', help='YAML file describing a metric.')
PARSER.add_argument(
'--project',
default='k8s-gubernator',
help='Charge the specified account for bigquery usage.')
PARSER.add_argument(
'--bucket',
help='Upload results to the specified gcs bucket.')
PARSER.add_argument(
'--backfill-days',
default=30,
type=int,
help='Number of days to backfill influxdb data.')
ARGS = PARSER.parse_args()
main(ARGS.config, ARGS.project, ARGS.bucket, ARGS.backfill_days)
|
pschmitt/home-assistant
|
refs/heads/dev
|
tests/components/openalpr_local/test_image_processing.py
|
13
|
"""The tests for the openalpr local platform."""
import homeassistant.components.image_processing as ip
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import MagicMock, PropertyMock, patch
from tests.common import assert_setup_component, get_test_home_assistant, load_fixture
from tests.components.image_processing import common
def mock_async_subprocess():
"""Get a Popen mock back."""
async_popen = MagicMock()
async def communicate(input=None):
"""Communicate mock."""
fixture = bytes(load_fixture("alpr_stdout.txt"), "utf-8")
return (fixture, None)
async_popen.communicate = communicate
return async_popen
class TestOpenAlprLocalSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.openalpr_demo_camera")
def test_setup_platform_name(self):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.test_local")
def test_setup_platform_without_region(self):
"""Set up platform with one entity without region."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
class TestOpenAlprLocal:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with patch(
"homeassistant.components.openalpr_local.image_processing."
"OpenAlprLocalEntity.should_poll",
new_callable=PropertyMock(return_value=False),
):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
state = self.hass.states.get("camera.demo_camera")
self.url = f"{self.hass.config.internal_url}{state.attributes.get(ATTR_ENTITY_PICTURE)}"
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen("image_processing.found_plate", mock_alpr_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch("asyncio.create_subprocess_exec", return_value=mock_async_subprocess())
def test_openalpr_process_image(self, popen_mock, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b"image")
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
state = self.hass.states.get("image_processing.test_local")
assert popen_mock.called
assert len(self.alpr_events) == 5
assert state.attributes.get("vehicles") == 1
assert state.state == "PE3R2X"
event_data = [
event.data
for event in self.alpr_events
if event.data.get("plate") == "PE3R2X"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "PE3R2X"
assert event_data[0]["confidence"] == float(98.9371)
assert event_data[0]["entity_id"] == "image_processing.test_local"
|
SGCreations/Flask
|
refs/heads/master
|
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/setuptools/command/test.py
|
285
|
from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import *
from pkg_resources import _namespace_packages
from unittest import TestLoader, main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__!='setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self,module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file!='__init__.py':
submodule = module.__name__+'.'+file[:-3]
else:
if resource_exists(
module.__name__, file+'/__init__.py'
):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests)!=1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution,'test_loader',None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
def with_project_on_sys_path(self, func):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
loader_ep = EntryPoint.parse("x="+self.test_loader)
loader_class = loader_ep.load(require=False)
cks = loader_class()
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader = cks
)
|
alanconway/dispatch
|
refs/heads/master
|
python/qpid_dispatch_internal/router/engine.py
|
3
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from data import MessageHELLO, MessageRA, MessageLSU, MessageMAU, MessageMAR, MessageLSR
from hello import HelloProtocol
from link import LinkStateEngine
from path import PathEngine
from mobile import MobileAddressEngine
from node import NodeTracker
from message import Message
from traceback import format_exc, extract_stack
import time
##
## Import the Dispatch adapters from the environment. If they are not found
## (i.e. we are in a test bench, etc.), load the stub versions.
##
from ..dispatch import IoAdapter, LogAdapter, LOG_TRACE, LOG_INFO, LOG_ERROR, LOG_STACK_LIMIT
from ..dispatch import TREATMENT_MULTICAST_FLOOD, TREATMENT_MULTICAST_ONCE
class RouterEngine:
"""
"""
def __init__(self, router_adapter, router_id, area, max_routers, config_override={}):
"""
Initialize an instance of a router for a domain.
"""
##
## Record important information about this router instance
##
self.domain = "domain"
self.router_adapter = router_adapter
self._config = None # Not yet loaded
self._log_hello = LogAdapter("ROUTER_HELLO")
self._log_ls = LogAdapter("ROUTER_LS")
self._log_ma = LogAdapter("ROUTER_MA")
self._log_general = LogAdapter("ROUTER")
self.io_adapter = [IoAdapter(self.receive, "qdrouter", 'L', '0', TREATMENT_MULTICAST_FLOOD),
IoAdapter(self.receive, "qdrouter.ma", 'L', '0', TREATMENT_MULTICAST_ONCE),
IoAdapter(self.receive, "qdrouter", 'T', '0', TREATMENT_MULTICAST_FLOOD),
IoAdapter(self.receive, "qdrouter.ma", 'T', '0', TREATMENT_MULTICAST_ONCE),
IoAdapter(self.receive, "qdhello", 'L', '0', TREATMENT_MULTICAST_FLOOD)]
self.max_routers = max_routers
self.id = router_id
self.instance = long(time.time())
self.area = area
self.log(LOG_INFO, "Router Engine Instantiated: id=%s instance=%d max_routers=%d" %
(self.id, self.instance, self.max_routers))
##
## Launch the sub-module engines
##
self.node_tracker = NodeTracker(self, self.max_routers)
self.hello_protocol = HelloProtocol(self, self.node_tracker)
self.link_state_engine = LinkStateEngine(self)
self.path_engine = PathEngine(self)
self.mobile_address_engine = MobileAddressEngine(self, self.node_tracker)
##========================================================================================
## Adapter Entry Points - invoked from the adapter
##========================================================================================
def getId(self):
"""
Return the router's ID
"""
return self.id
@property
def config(self):
if not self._config:
try: self._config = self.router_adapter.get_agent().find_entity_by_type('router')[0]
except IndexError: raise ValueError("No router configuration found")
return self._config
def addressAdded(self, addr):
"""
"""
try:
if addr[0] in 'MCD':
self.mobile_address_engine.add_local_address(addr)
except Exception:
self.log_ma(LOG_ERROR, "Exception in new-address processing\n%s" % format_exc(LOG_STACK_LIMIT))
def addressRemoved(self, addr):
"""
"""
try:
if addr[0] in 'MCD':
self.mobile_address_engine.del_local_address(addr)
except Exception:
self.log_ma(LOG_ERROR, "Exception in del-address processing\n%s" % format_exc(LOG_STACK_LIMIT))
def linkLost(self, link_id):
"""
"""
self.node_tracker.link_lost(link_id)
def handleTimerTick(self):
"""
"""
try:
now = time.time()
self.hello_protocol.tick(now)
self.link_state_engine.tick(now)
self.node_tracker.tick(now)
except Exception:
self.log(LOG_ERROR, "Exception in timer processing\n%s" % format_exc(LOG_STACK_LIMIT))
def handleControlMessage(self, opcode, body, link_id, cost):
"""
"""
try:
now = time.time()
if opcode == 'HELLO':
msg = MessageHELLO(body)
self.log_hello(LOG_TRACE, "RCVD: %r" % msg)
self.hello_protocol.handle_hello(msg, now, link_id, cost)
elif opcode == 'RA':
msg = MessageRA(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_ra(msg, now)
elif opcode == 'LSU':
msg = MessageLSU(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_lsu(msg, now)
elif opcode == 'LSR':
msg = MessageLSR(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_lsr(msg, now)
elif opcode == 'MAU':
msg = MessageMAU(body)
self.log_ma(LOG_TRACE, "RCVD: %r" % msg)
self.mobile_address_engine.handle_mau(msg, now)
elif opcode == 'MAR':
msg = MessageMAR(body)
self.log_ma(LOG_TRACE, "RCVD: %r" % msg)
self.mobile_address_engine.handle_mar(msg, now)
except Exception:
self.log(LOG_ERROR, "Control message error: opcode=%s body=%r\n%s" % (opcode, body, format_exc(LOG_STACK_LIMIT)))
def receive(self, message, link_id, cost):
"""
This is the IoAdapter message-receive handler
"""
try:
self.handleControlMessage(message.properties['opcode'], message.body, link_id, cost)
except Exception:
self.log(LOG_ERROR, "Exception in raw message processing: properties=%r body=%r\n%s" %
(message.properties, message.body, format_exc(LOG_STACK_LIMIT)))
def getRouterData(self, kind):
"""
"""
if kind == 'help':
return { 'help' : "Get list of supported values for kind",
'link-state' : "This router's link state",
'link-state-set' : "The set of link states from known routers",
'next-hops' : "Next hops to each known router"
}
if kind == 'link-state' : return self.neighbor_engine.link_state.to_dict()
if kind == 'link-state-set' :
copy = {}
for _id,_ls in self.link_state_engine.collection.items():
copy[_id] = _ls.to_dict()
return copy
return {'notice':'Use kind="help" to get a list of possibilities'}
##========================================================================================
## Adapter Calls - outbound calls to Dispatch
##========================================================================================
def log(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_general.log(level, text, info[0], info[1])
def log_hello(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_hello.log(level, text, info[0], info[1])
def log_ls(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_ls.log(level, text, info[0], info[1])
def log_ma(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_ma.log(level, text, info[0], info[1])
def send(self, dest, msg):
"""
Send a control message to another router.
"""
app_props = {'opcode' : msg.get_opcode() }
self.io_adapter[0].send(Message(address=dest, properties=app_props, body=msg.to_dict()), True, True)
def node_updated(self, addr, reachable, neighbor):
"""
"""
self.router_adapter(addr, reachable, neighbor)
|
stevehof/CouchPotatoServer
|
refs/heads/master
|
libs/guessit/transfo/guess_bonus_features.py
|
150
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import found_property
import logging
log = logging.getLogger(__name__)
def process(mtree):
def previous_group(g):
for leaf in mtree.unidentified_leaves()[::-1]:
if leaf.node_idx < g.node_idx:
return leaf
def next_group(g):
for leaf in mtree.unidentified_leaves():
if leaf.node_idx > g.node_idx:
return leaf
def same_group(g1, g2):
return g1.node_idx[:2] == g2.node_idx[:2]
bonus = [ node for node in mtree.leaves() if 'bonusNumber' in node.guess ]
if bonus:
bonusTitle = next_group(bonus[0])
if same_group(bonusTitle, bonus[0]):
found_property(bonusTitle, 'bonusTitle', 0.8)
filmNumber = [ node for node in mtree.leaves()
if 'filmNumber' in node.guess ]
if filmNumber:
filmSeries = previous_group(filmNumber[0])
found_property(filmSeries, 'filmSeries', 0.9)
title = next_group(filmNumber[0])
found_property(title, 'title', 0.9)
season = [ node for node in mtree.leaves() if 'season' in node.guess ]
if season and 'bonusNumber' in mtree.info:
series = previous_group(season[0])
if same_group(series, season[0]):
found_property(series, 'series', 0.9)
|
kaleidos/django-kaggregate
|
refs/heads/master
|
kaggregate/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from .base import BaseAggregator, BaseModelAggregator
from .decorators import as_map_reduce, as_django_aggregator
from .backends import get_aggregate_value, flush_current_storage
from . import register
__all__ = ['BaseAggregator', 'BaseModelAggregator', 'as_map_reduce', 'register',
'as_django_aggregator', 'get_aggregate_value', 'flush_current_storage',]
|
FWennerdahl/GoSublime
|
refs/heads/master
|
gstest.py
|
12
|
from gosubl import gs
from gosubl import mg9
import os
import re
import sublime
import sublime_plugin
DOMAIN = 'GsTest'
TEST_PAT = re.compile(r'^((Test|Example|Benchmark)\w*)')
class GsTestCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
return gs.is_go_source_view(self.window.active_view())
def run(self):
def f(res, err):
if err:
gs.notify(DOMAIN, err)
return
mats = {}
args = {}
decls = res.get('file_decls', [])
decls.extend(res.get('pkg_decls', []))
for d in decls:
name = d['name']
prefix, _ = match_prefix_name(name)
kind = d['kind'].lstrip('+- ')
if prefix and kind == 'func' and d['repr'] == '':
mats[prefix] = True
args[name] = name
names = sorted(args.keys())
ents = ['Run all tests and examples']
for k in ['Test', 'Benchmark', 'Example']:
if mats.get(k):
s = 'Run %ss Only' % k
ents.append(s)
if k == 'Benchmark':
args[s] = ['-test.run=none', '-test.bench="%s.*"' % k]
else:
args[s] = ['-test.run="%s.*"' % k]
for k in names:
ents.append(k)
if k.startswith('Benchmark'):
args[k] = ['-test.run=none', '-test.bench="^%s$"' % k]
else:
args[k] = ['-test.run="^%s$"' % k]
def cb(i, win):
if i >= 0:
a = args.get(ents[i], [])
win.active_view().run_command('gs9o_open', {'run': gs.lst('go', 'test', a)})
gs.show_quick_panel(ents, cb)
win, view = gs.win_view(None, self.window)
if view is None:
return
vfn = gs.view_fn(view)
src = gs.view_src(view)
pkg_dir = ''
if view.file_name():
pkg_dir = os.path.dirname(view.file_name())
mg9.declarations(vfn, src, pkg_dir, f)
def match_prefix_name(s):
m = TEST_PAT.match(s)
return (m.group(2), m.group(1)) if m else ('', '')
def handle_action(view, action):
fn = view.file_name()
prefix, name = match_prefix_name(view.substr(view.word(gs.sel(view))))
ok = prefix and fn and fn.endswith('_test.go')
if ok:
if action == 'right-click':
pat = '^%s.*' % prefix
else:
pat = '^%s$' % name
if prefix == 'Benchmark':
cmd = ['go', 'test', '-test.run=none', '-test.bench="%s"' % pat]
else:
cmd = ['go', 'test', '-test.run="%s"' % pat]
view.run_command('gs9o_open', {'run': cmd})
return ok
|
blazewicz/micropython
|
refs/heads/master
|
tests/inlineasm/asmbitops.py
|
99
|
@micropython.asm_thumb
def clz(r0):
clz(r0, r0)
print(clz(0xf0))
print(clz(0x8000))
@micropython.asm_thumb
def rbit(r0):
rbit(r0, r0)
print(hex(rbit(0xf0)))
print(hex(rbit(0x8000)))
|
nathanaevitas/odoo
|
refs/heads/master
|
openerp/addons/hr_contract/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kalaidin/luigi
|
refs/heads/master
|
test/contrib/_gcs_test.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is an integration test for the GCS-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
import googleapiclient.errors
import oauth2client
import os
import tempfile
import unittest
from luigi.contrib import gcs
from target_test import FileSystemTargetTestMixin
# In order to run this test, you should set these to your GCS project/bucket.
# Unfortunately there's no mock
PROJECT_ID = os.environ.get('GCS_TEST_PROJECT_ID', 'your_project_id_here')
BUCKET_NAME = os.environ.get('GCS_TEST_BUCKET', 'your_test_bucket_here')
CREDENTIALS = oauth2client.client.GoogleCredentials.get_application_default()
ATTEMPTED_BUCKET_CREATE = False
def bucket_url(suffix):
return 'gs://{}/{}'.format(BUCKET_NAME, suffix)
class _GCSBaseTestCase(unittest.TestCase):
def setUp(self):
self.client = gcs.GCSClient(CREDENTIALS)
global ATTEMPTED_BUCKET_CREATE
if not ATTEMPTED_BUCKET_CREATE:
try:
self.client.client.buckets().insert(
project=PROJECT_ID, body={'name': PROJECT_ID}).execute()
except googleapiclient.errors.HttpError as ex:
if ex.resp.status != 409: # bucket already exists
raise
ATTEMPTED_BUCKET_CREATE = True
for item in self.client.listdir(bucket_url('')):
self.client.remove(item)
class GCSClientTest(_GCSBaseTestCase):
def test_not_exists(self):
self.assertFalse(self.client.exists(bucket_url('does_not_exist')))
self.assertFalse(self.client.isdir(bucket_url('does_not_exist')))
def test_exists(self):
self.client.put_string('hello', bucket_url('exists_test'))
self.assertTrue(self.client.exists(bucket_url('exists_test')))
self.assertFalse(self.client.isdir(bucket_url('exists_test')))
def test_mkdir(self):
self.client.mkdir(bucket_url('exists_dir_test'))
self.assertTrue(self.client.exists(bucket_url('exists_dir_test')))
self.assertTrue(self.client.isdir(bucket_url('exists_dir_test')))
def test_mkdir_by_upload(self):
self.client.put_string('hello', bucket_url('test_dir_recursive/yep/file'))
self.assertTrue(self.client.exists(bucket_url('test_dir_recursive')))
self.assertTrue(self.client.isdir(bucket_url('test_dir_recursive')))
def test_download(self):
self.client.put_string('hello', bucket_url('test_download'))
fp = self.client.download(bucket_url('test_download'))
self.assertEquals(b'hello', fp.read())
def test_rename(self):
self.client.put_string('hello', bucket_url('test_rename_1'))
self.client.rename(bucket_url('test_rename_1'), bucket_url('test_rename_2'))
self.assertFalse(self.client.exists(bucket_url('test_rename_1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_2')))
def test_rename_recursive(self):
self.client.mkdir(bucket_url('test_rename_recursive'))
self.client.put_string('hello', bucket_url('test_rename_recursive/1'))
self.client.put_string('hello', bucket_url('test_rename_recursive/2'))
self.client.rename(bucket_url('test_rename_recursive'), bucket_url('test_rename_recursive_dest'))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_rename_recursive/1')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest')))
self.assertTrue(self.client.exists(bucket_url('test_rename_recursive_dest/1')))
def test_remove(self):
self.client.put_string('hello', bucket_url('test_remove'))
self.client.remove(bucket_url('test_remove'))
self.assertFalse(self.client.exists(bucket_url('test_remove')))
def test_remove_recursive(self):
self.client.mkdir(bucket_url('test_remove_recursive'))
self.client.put_string('hello', bucket_url('test_remove_recursive/1'))
self.client.put_string('hello', bucket_url('test_remove_recursive/2'))
self.client.remove(bucket_url('test_remove_recursive'))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/1')))
self.assertFalse(self.client.exists(bucket_url('test_remove_recursive/2')))
def test_listdir(self):
self.client.put_string('hello', bucket_url('test_listdir/1'))
self.client.put_string('hello', bucket_url('test_listdir/2'))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir/'))))
self.assertEqual([bucket_url('test_listdir/1'), bucket_url('test_listdir/2')],
list(self.client.listdir(bucket_url('test_listdir'))))
def test_put_file(self):
with tempfile.NamedTemporaryFile() as fp:
fp.write(b'hi')
fp.flush()
self.client.put(fp.name, bucket_url('test_put_file'))
self.assertTrue(self.client.exists(bucket_url('test_put_file')))
self.assertEquals(b'hi', self.client.download(bucket_url('test_put_file')).read())
class GCSTargetTest(_GCSBaseTestCase, FileSystemTargetTestMixin):
def create_target(self, format=None):
return gcs.GCSTarget(bucket_url(self.id()), format=format, client=self.client)
|
TDAbboud/micropython
|
refs/heads/master
|
tests/basics/string_join.py
|
45
|
print(','.join(()))
print(','.join(('a',)))
print(','.join(('a', 'b')))
print(','.join([]))
print(','.join(['a']))
print(','.join(['a', 'b']))
print(''.join(''))
print(''.join('abc'))
print(','.join('abc'))
print(','.join('abc' for i in range(5)))
print(b','.join([b'abc', b'123']))
try:
''.join(None)
except TypeError:
print("TypeError")
try:
print(b','.join(['abc', b'123']))
except TypeError:
print("TypeError")
try:
print(','.join([b'abc', b'123']))
except TypeError:
print("TypeError")
# joined by the compiler
print("a" "b")
print("a" '''b''')
print("a" # inline comment
"b")
print("a" \
"b")
# the following should not be joined by the compiler
x = 'a'
'b'
print(x)
|
nlandais/ansible-modules-core
|
refs/heads/devel
|
inventory/group_by.py
|
161
|
# -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by: key=machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
'''
|
sklam/numba
|
refs/heads/master
|
numba/tests/test_api.py
|
7
|
import warnings
import numba
from numba import jit, njit
from numba.tests.support import TestCase
import unittest
class TestNumbaModule(TestCase):
"""
Test the APIs exposed by the top-level `numba` module.
"""
def check_member(self, name):
self.assertTrue(hasattr(numba, name), name)
self.assertIn(name, numba.__all__)
def test_numba_module(self):
# jit
self.check_member("jit")
self.check_member("vectorize")
self.check_member("guvectorize")
self.check_member("njit")
# errors
self.check_member("NumbaError")
self.check_member("TypingError")
# types
self.check_member("int32")
# misc
numba.__version__ # not in __all__
class TestJitDecorator(TestCase):
"""
Test the jit and njit decorators
"""
def test_jit_nopython_forceobj(self):
with self.assertRaises(ValueError) as cm:
jit(nopython=True, forceobj=True)
self.assertIn(
"Only one of 'nopython' or 'forceobj' can be True.",
str(cm.exception)
)
def py_func(x):
return x
jit_func = jit(nopython=True)(py_func)
jit_func(1)
# Check length of nopython_signatures to check
# which mode the function was compiled in
self.assertEqual(len(jit_func.nopython_signatures), 1)
jit_func = jit(forceobj=True)(py_func)
jit_func(1)
self.assertEqual(len(jit_func.nopython_signatures), 0)
def test_njit_nopython_forceobj(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
njit(forceobj=True)
self.assertEqual(len(w), 1)
self.assertIn(
'forceobj is set for njit and is ignored', str(w[0].message)
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
njit(nopython=True)
self.assertEqual(len(w), 1)
self.assertIn(
'nopython is set for njit and is ignored', str(w[0].message)
)
def py_func(x):
return x
jit_func = njit(nopython=True)(py_func)
jit_func(1)
self.assertEqual(len(jit_func.nopython_signatures), 1)
jit_func = njit(forceobj=True)(py_func)
jit_func(1)
# Since forceobj is ignored this has to compile in nopython mode
self.assertEqual(len(jit_func.nopython_signatures), 1)
if __name__ == '__main__':
unittest.main()
|
Awesomeomics/webserver
|
refs/heads/master
|
env/lib/python2.7/site-packages/jinja2/sandbox.py
|
637
|
# -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, function_type, method_type, \
traceback_type, code_type, frame_type, generator_type, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
if not PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set()
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, function_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, method_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (code_type, traceback_type, frame_type)):
return True
elif isinstance(obj, generator_type):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
p0psicles/SickRage
|
refs/heads/master
|
lib/feedparser/namespaces/_base.py
|
43
|
# Support for the Atom, RSS, RDF, and CDF feed formats
# Copyright 2010-2015 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import copy
from ..datetimes import registerDateHandler, _parse_date
from ..urls import _makeSafeAbsoluteURI
from ..util import FeedParserDict
class Namespace(object):
"""Support for the Atom, RSS, RDF, and CDF feed formats.
The feed formats all share common elements, some of which have conflicting
interpretations. For simplicity, all of the base feed format support is
collected here.
"""
supported_namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
}
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_language(self, attrsD):
self.push('language', 1)
def _end_language(self):
self.lang = self.pop('language')
def _start_webmaster(self, attrsD):
self.push('publisher', 1)
def _end_webmaster(self):
self.pop('publisher')
self._sync_author_detail('publisher')
def _start_published(self, attrsD):
self.push('published', 1)
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_keywords = _start_category
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_keywords = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', list(attrsD.items()))
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = 'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD['url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
|
moai/mongrel2
|
refs/heads/master
|
examples/zcov/zcov/GCovGroup.py
|
96
|
#!/usr/bin/python
from __future__ import division
from pprint import pprint
import cPickle
import os
import warnings
from zcov import GCovParser
class GCovGroup:
@staticmethod
def fromfile(path):
f = open(path)
try:
res = cPickle.load(f)
header,version = res[0],res[1]
except:
raise ValueError,'invalid zcov input'
if header != 'zcov-data':
raise ValueError,'input is not in zcov format'
elif version != 1:
raise ValueError,'unrecognized zcov version'
return res[2]
def tofile(self, path):
f = open(path,'wb')
cPickle.dump(('zcov-data',1,self),f,-1)
f.close()
def __init__(self):
self.entryMap = {}
def addEntry(self, path, entry):
record = self.entryMap.get(path)
if record is None:
self.entryMap[path] = entry
else:
self.entryMap[path] = self.mergeData(record,entry)
def addGCDA(self, data):
for path,entry in data.entries:
self.addEntry(path, entry)
def merge(self, b):
for path,entry in b.entryMap.items():
self.addEntry(path, entry)
def mergeData(self, a, b):
keys = self.mergeKeys(a.keys, b.keys)
lines = self.mergeLines(a.lines, b.lines)
calls = self.mergeCalls(a.calls, b.calls)
branches = self.mergeBranches(a.branches, b.branches)
functions = self.mergeFunctions(a.functions, b.functions)
return GCovParser.GCovFileData(keys, lines, calls, branches, functions)
def mergeKeys(self, aKeys, bKeys):
if set(aKeys) != set(bKeys):
raise ValueError,'Keys differ: %s, %s'%(pprint.pformat(a.keys),
pprint.pformat(b.keys))
keys = {}
for key,aValue in aKeys.items():
bValue = bKeys[key]
if key=='Source':
if aValue != bValue:
raise ValueError,'Key ("%s") differs: %s %s'%(key,
aValue,
bValue)
value = aValue
elif key in ('Runs','Programs'):
value = str(int(aValue) + int(bValue))
elif key in ('Data','Graph'):
value = aValue+','+bValue
else:
raise ValueError,'Unrecognized key: "%s"'%(key,)
keys[key] = value
return keys
def mergeLines(self, aLines, bLines):
if len(aLines) != len(bLines):
raise ValueError,'Entry mismatch (number of lines)'
lines = [None]*len(aLines)
for i,(a,b) in enumerate(zip(aLines,bLines)):
if a is None or b is None:
# Executability can change across tests (conditional
# code), take the non-None one if it exists.
lines[i] = (a,b)[a is None]
else:
lines[i] = a + b
return lines
def mergeLineList(self, aList, bList, merge):
if not aList:
for bItem in bList:
yield bItem
elif not bList:
for aItem in aList:
yield aItem
aIter,bIter = iter(aList),iter(bList)
aItem,bItem = aIter.next(),bIter.next()
while 1:
if aItem[0]==bItem[0]:
yield merge(aItem,bItem)
try:
aItem = aIter.next()
except StopIteration:
for bItem in bIter:
yield bItem
break
try:
bItem = bIter.next()
except StopIteration:
for aItem in aIter:
yield aItem
break
elif aItem[0]<bItem[0]:
yield aItem
try:
aItem = aIter.next()
except StopIteration:
yield bItem
for bItem in bIter:
yield bItem
break
else:
yield bItem
try:
bItem = bIter.next()
except StopIteration:
yield aItem
for aItem in bIter:
yield aItem
break
def mergeCalls(self, aCalls, bCalls):
def merge(a,b):
if a[1] != b[1]:
warnings.warn('Call mismatch (numbers differ)')
# raise ValueError,'Call mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.CallNotExecuted
if GCovParser.GCovFileData.CallReturned in (a[2],b[2]):
code = GCovParser.GCovFileData.CallReturned
return (a[0],a[1],code,count)
return list(self.mergeLineList(aCalls,bCalls,merge))
def mergeBranches(self, aBranches, bBranches):
def merge(a,b):
# XXX This is really wrong
if a[1] != b[1]:
warnings.warn('Branch mismatch (numbers differ)')
# raise ValueError,'Branch mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.BranchNotTaken
if GCovParser.GCovFileData.BranchTaken in (a[2],b[2]):
code = GCovParser.GCovFileData.BranchTaken
return (a[0],a[1],code,count)
return list(self.mergeLineList(aBranches,bBranches,merge))
def mergeFunctions(self, aFunctions, bFunctions):
def merge(a,b):
if a[0] != b[0]:
warnings.warn('Function mismatch (names differ)')
# raise ValueError,'Function mismatch (names differ)'
return (a[0],a[1]+b[1])
return list(self.mergeLineList(aFunctions,bFunctions,merge))
###
def main():
from optparse import OptionParser
op = OptionParser("usage: %prog [options] files")
opts,args = op.parse_args()
group = GCovGroup()
for f in args:
res = GCovParser.parseGCDA(f)
group.addGCDA(res)
print '%d total files'%(len(group.entryMap),)
if __name__=='__main__':
main()
|
super1337/Super1337-CTF
|
refs/heads/master
|
superleetctf/wsgi.py
|
1
|
"""
WSGI config for superleetctf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superleetctf.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
schaffino/impyla
|
refs/heads/master
|
bin/register-impala-udfs.py
|
6
|
#! /usr/bin/env python
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import llvm.core as lc
from hdfs.client import InsecureClient
import impala.dbapi
def log(msg):
sys.stderr.write("%s\n" % msg)
sys.stderr.flush()
llvm2impala = {
'struct.impala_udf::BooleanVal': 'BOOLEAN',
'struct.impala_udf::TinyIntVal': 'TINYINT',
'struct.impala_udf::SmallIntVal': 'SMALLINT',
'struct.impala_udf::IntVal': 'INT',
'struct.impala_udf::BigIntVal': 'BIGINT',
'struct.impala_udf::FloatVal': 'FLOAT',
'struct.impala_udf::DoubleVal': 'DOUBLE',
'struct.impala_udf::StringVal': 'STRING',
'struct.impala_udf::TimestampVal': 'TIMESTAMP'
}
parser = argparse.ArgumentParser(description="Register clang-compiled UDFs "
"with Impala")
parser.add_argument('-i', '--llvm-path', required=True,
help="Local path to LLVM module")
parser.add_argument('-o', '--hdfs-path', required=True,
help="Path in HDFS to store LLVM module, including the "
"final file name")
parser.add_argument('-n', '--name', required=True, action='append',
help="Specify the name of the C++ UDF; must be matched "
"with a --return-type")
parser.add_argument('-t', '--return-type', required=True, action='append',
help="Specify a return type for the corresponding "
"function; use Impala types, e.g., STRING or INT")
parser.add_argument('-j', '--impala-host', required=False, default='localhost',
help="Impala daemon hostname")
parser.add_argument('-q', '--impala-port', required=False, default=21050,
help="Port for Impala daemon")
parser.add_argument('-k', '--nn-host', required=False, default='localhost',
help="Namenode hostname")
parser.add_argument('-p', '--webhdfs-port', required=False, default=50070,
type=int, help="Port for WebHDFS interface")
parser.add_argument('-u', '--user', required=False,
help="User name to connect to HDFS with")
parser.add_argument('-f', '--force', action='store_true',
help="Overwrite LLVM on HDFS if it already exists")
parser.add_argument('-d', '--db', required=False,
help="Specify which database to add the functions to")
args = parser.parse_args()
# do some input validation
if len(args.name) != len(args.return_type):
raise ValueError("Must supply a return type or each specified "
"function name.")
if not args.hdfs_path.endswith('.ll'):
raise ValueError("The HDFS file name must end with .ll")
# load the LLVM IR
with open(args.llvm_path, 'rb') as ip:
bc = ip.read()
ll = lc.Module.from_bitcode(bc)
log("Loaded the LLVM IR file %s" % args.llvm_path)
# load symbols and types for each function in the LLVM module
functions = []
for function in ll.functions:
try:
symbol = function.name
log("Loading types for function %s" % symbol)
# skip the first argument, which is FunctionContext*
arg_types = tuple([llvm2impala[arg.pointee.name]
for arg in function.type.pointee.args[1:]])
functions.append((symbol, arg_types))
except (AttributeError, KeyError):
# this process could fail for non-UDF helper functions...just ignore
# them, because we're not going to be registering them anyway
log("Had trouble with function %s; moving on..." % symbol)
pass
# transfer the LLVM module to HDFS
url = 'http://{nn_host}:{webhdfs_port}'.format(
nn_host=args.nn_host, webhdfs_port=args.webhdfs_port)
hdfs_client = InsecureClient(url, user=args.user)
hdfs_client.write(args.hdfs_path, bc, overwrite=args.force)
log("Transferred LLVM IR to HDFS at %s" % args.hdfs_path)
# register the functions with impala
conn = impala.dbapi.connect(host=args.impala_host, port=args.impala_port)
cursor = conn.cursor(user=args.user)
log("Connected to impalad: %s" % args.impala_host)
if args.db:
cursor.execute('USE %s' % args.db)
cursor.execute("SHOW FUNCTIONS")
registered_functions = cursor.fetchall()
for (udf_name, return_type) in zip(args.name, args.return_type):
log("Registering function %s" % udf_name)
# find matching LLVM symbols to the current UDF name
matches = [pair for pair in functions if udf_name in pair[0]]
if len(matches) == 0:
log("Couldn't find a symbol matching %s; skipping..." % udf_name)
continue
if len(matches) > 1:
log("Found multiple symbols matching %s; skipping..." % udf_name)
continue
(symbol, arg_types) = matches[0]
impala_name = '%s(%s)' % (udf_name, ','.join(arg_types))
if args.force and impala_name in registered_functions:
log("Overwriting function %s" % impala_name)
cursor.execute("DROP FUNCTION %s" % impala_name)
register_query = (
"CREATE FUNCTION %s RETURNS %s LOCATION '%s' SYMBOL='%s'" % (
impala_name, return_type, args.hdfs_path, symbol))
log(register_query)
cursor.execute(register_query)
log("Successfully registered %s" % impala_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.