repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
vishdha/erpnext | refs/heads/develop | erpnext/controllers/status_updater.py | 22 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or, nowdate, getdate
from frappe import _
from frappe.model.document import Document
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Lost Quotation", "has_lost_quotation"],
["Opportunity", "has_opportunity"],
["Quotation", "has_quotation"],
["Converted", "has_customer"],
],
"Opportunity": [
["Lost", "eval:self.status=='Lost'"],
["Lost", "has_lost_quotation"],
["Quotation", "has_active_quotation"],
["Converted", "has_ordered_quotation"],
["Closed", "eval:self.status=='Closed'"]
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["To Deliver and Bill", "eval:self.per_delivered < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_delivered == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Deliver", "eval:self.per_delivered < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_delivered == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.order_type == 'Maintenance' and self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Sales Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount<=0 and self.docstatus==1 and self.is_return==0"],
["Credit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1 and self.is_return==0 and get_value('Sales Invoice', {'is_return': 1, 'return_against': self.name, 'docstatus': 1})"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount<=0 and self.docstatus==1 and self.is_return==0"],
["Debit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1 and self.is_return==0 and get_value('Purchase Invoice', {'is_return': 1, 'return_against': self.name, 'docstatus': 1})"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Order": [
["Draft", None],
["To Receive and Bill", "eval:self.per_received < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_received == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Receive", "eval:self.per_received < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_received == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Delivered", "eval:self.status=='Delivered'"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Delivery Note": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Purchase Receipt": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Material Request": [
["Draft", None],
["Stopped", "eval:self.status == 'Stopped'"],
["Cancelled", "eval:self.docstatus == 2"],
["Pending", "eval:self.status != 'Stopped' and self.per_ordered == 0 and self.docstatus == 1"],
["Partially Ordered", "eval:self.status != 'Stopped' and self.per_ordered < 100 and self.per_ordered > 0 and self.docstatus == 1"],
["Ordered", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Purchase'"],
["Transferred", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Transfer'"],
["Issued", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Issue'"]
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False, status=None, update_modified=True):
if self.is_new():
if self.get('amended_from'):
self.status = 'Draft'
return
if self.doctype in status_map:
_status = self.status
if status and update:
self.db_set("status", status)
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if frappe.safe_eval(s[1][5:], None, { "self": self.as_dict(), "getdate": getdate,
"nowdate": nowdate, "get_value": frappe.db.get_value }):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Cancelled", "Partially Ordered",
"Ordered", "Issued", "Transferred"):
self.add_comment("Label", _(self.status))
if update:
self.db_set('status', self.status, update_modified = update_modified)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
if "target_ref_field" not in args:
# if target_ref_field is not specified, the programmer does not want to validate qty / amount
continue
# get unique transactions to update
for d in self.get_all_children():
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
# if not item[args['target_ref_field']]:
# msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
if args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
self.limits_crossed_error(args, item)
elif item[args['target_ref_field']]:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
self.limits_crossed_error(args, item)
def limits_crossed_error(self, args, item):
'''Raise exception for limits crossed'''
frappe.throw(_('This document is over limit by {0} {1} for item {4}. Are you making another {3} against the same {2}?')
.format(
frappe.bold(_(item["target_ref_field"].title())),
frappe.bold(item["reduce_by"]),
frappe.bold(_(args.get('target_dt'))),
frappe.bold(_(self.doctype)),
frappe.bold(item.get('item_code'))
) + '<br><br>' +
_('To allow over-billing or over-ordering, update "Allowance" in Stock Settings or the Item.'),
title = _('Limit Crossed'))
def update_qty(self, update_modified=True):
"""Updates qty or amount at row level
:param update_modified: If true, updates `modified` and `modified_by` for target parent doc
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
self._update_children(args, update_modified)
if "percent_join_field" in args:
self._update_percent_field_in_targets(args, update_modified)
def _update_children(self, args, update_modified):
"""Update quantities or amount in child table"""
for d in self.get_all_children():
if d.doctype != args['source_dt']:
continue
self._update_modified(args, update_modified)
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (
(select ifnull(sum(%(source_field)s), 0)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s)
%(second_source_condition)s
)
%(update_modified)s
where name='%(detail_id)s'""" % args)
def _update_percent_field_in_targets(self, args, update_modified=True):
"""Update percent field in parent transaction"""
distinct_transactions = set([d.get(args['percent_join_field'])
for d in self.get_all_children(args['source_dt'])])
for name in distinct_transactions:
if name:
args['name'] = name
self._update_percent_field(args, update_modified)
def _update_percent_field(self, args, update_modified=True):
"""Update percent field in parent transaction"""
self._update_modified(args, update_modified)
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = round(
ifnull((select
ifnull(sum(if(%(target_ref_field)s > %(target_field)s, abs(%(target_field)s), abs(%(target_ref_field)s))), 0)
/ sum(abs(%(target_ref_field)s)) * 100
from `tab%(target_dt)s` where parent="%(name)s"), 0), 2)
%(update_modified)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(%(target_parent_field)s<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
if update_modified:
target = frappe.get_doc(args["target_parent_dt"], args["name"])
target.set_status(update=True)
target.notify_update()
def _update_modified(self, args, update_modified):
args['update_modified'] = ''
if update_modified:
args['update_modified'] = ', modified = now(), modified_by = "{0}"'\
.format(frappe.db.escape(frappe.session.user))
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = ref_dt.lower().replace(" ", "_")
zero_amount_refdoc = []
all_zero_amount_refdoc = frappe.db.sql_list("""select name from `tab%s`
where docstatus=1 and base_net_total = 0""" % ref_dt)
for item in self.get("items"):
if item.get(ref_fieldname) \
and item.get(ref_fieldname) in all_zero_amount_refdoc \
and item.get(ref_fieldname) not in zero_amount_refdoc:
zero_amount_refdoc.append(item.get(ref_fieldname))
if zero_amount_refdoc:
self.update_billing_status(zero_amount_refdoc, ref_dt, ref_fieldname)
def update_billing_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0)
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
ref_doc = frappe.get_doc(ref_dt, ref_dn)
ref_doc.db_set("per_billed", per_billed)
ref_doc.set_status(update=True)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
|
TheWardoctor/Wardoctors-repo | refs/heads/master | script.module.exodus/lib/resources/lib/sources/en/to_be_fixed/needsfixing/playbox.py | 5 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
import hashlib, string, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import directstream
from resources.lib.modules import pyaes as pyaes
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['playboxhd.com']
self.base_link = 'http://playboxhd.com'
self.search_link = '/api/box?type=search&os=Android&v=291.0&k=0&keyword=%s'
self.sources_link = '/api/box?type=detail&id=%s&os=Android&v=291.0&k=0&al=key'
self.stream_link = '/api/box?type=stream&id=%s&os=Android&v=291.0'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return None
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['season'], url['episode'], url['premiered'] = title, season, episode, premiered
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, locDict):
sources = []
try:
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
query = self.search_link % (urllib.quote_plus(title))
query = urlparse.urljoin(self.base_link, query)
result = client.request(query, mobile=True, timeout=20, output='extended')
r = json.loads(result[0])
r = r['data']['films']
years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)]
#print r
if 'episode' in data:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ]
r = [i[0] for i in r if any(x in i[1] for x in years)][0]
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0]
else:
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
r = [i for i in r if any(x in i['publishDate'] for x in years)][0]
#print r
result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
r = json.loads(result[0])
r = r['data']['chapters'][0]
result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True,
headers=result[4], output='extended')
r = json.loads(result[0])
r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']]
sources = []
for i in r:
try:
valid, hoster = source_utils.is_host_valid(i[2], hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(i[2], hoster)
for x in urls:
q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0])
u = x['url'] if host == 'gvideo' else i[2]
sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False})
except:
pass
return sources
except Exception as e:
return sources
def resolve(self, url):
if 'google' in url:
return directstream.googlepass(url)
else:
return url
def _decrypt(self,url):
import base64
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(base64.urlsafe_b64decode('cXdlcnR5dWlvcGFzZGZnaGprbHp4YzEyMzQ1Njc4OTA='), '\0' * 16))
url = base64.decodestring(url)
url = decrypter.feed(url) + decrypter.feed()
return url
|
z-jason/anki | refs/heads/master | aqt/forms/preview.py | 1 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/preview.ui'
#
# Created: Sun Mar 30 10:19:29 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(335, 282)
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.frontPrevBox = QtGui.QVBoxLayout(self.groupBox)
self.frontPrevBox.setMargin(0)
self.frontPrevBox.setObjectName(_fromUtf8("frontPrevBox"))
self.verticalLayout_3.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.backPrevBox = QtGui.QVBoxLayout(self.groupBox_2)
self.backPrevBox.setMargin(0)
self.backPrevBox.setObjectName(_fromUtf8("backPrevBox"))
self.verticalLayout_3.addWidget(self.groupBox_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.groupBox.setTitle(_("Front Preview"))
self.groupBox_2.setTitle(_("Back Preview"))
|
hynnet/openwrt-mt7620 | refs/heads/master | staging_dir/host/lib/python2.7/xml/dom/minicompat.py | 209 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
ChinaMassClouds/copenstack-server | refs/heads/master | openstack/src/nova-2014.2/nova/virt/ironic/driver.py | 1 | # coding=utf-8
#
# Copyright 2014 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver wrapping the Ironic API, such that Nova may provision
bare metal resources.
"""
import logging as py_logging
import time
from oslo.config import cfg
import six
from nova.compute import arch
from nova.compute import hvtype
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.ironic import client_wrapper
from nova.virt.ironic import ironic_states
from nova.virt.ironic import patcher
ironic = None
LOG = logging.getLogger(__name__)
opts = [
cfg.IntOpt('api_version',
default=1,
help='Version of Ironic API service endpoint.'),
cfg.StrOpt('api_endpoint',
help='URL for Ironic API endpoint.'),
cfg.StrOpt('admin_username',
help='Ironic keystone admin name'),
cfg.StrOpt('admin_password',
help='Ironic keystone admin password.'),
cfg.StrOpt('admin_auth_token',
help='Ironic keystone auth token.'),
cfg.StrOpt('admin_url',
help='Keystone public API endpoint.'),
cfg.StrOpt('client_log_level',
help='Log level override for ironicclient. Set this in '
'order to override the global "default_log_levels", '
'"verbose", and "debug" settings.'),
cfg.StrOpt('admin_tenant_name',
help='Ironic keystone tenant name.'),
cfg.IntOpt('api_max_retries',
default=60,
help=('How many retries when a request does conflict.')),
cfg.IntOpt('api_retry_interval',
default=2,
help=('How often to retry in seconds when a request '
'does conflict')),
]
ironic_group = cfg.OptGroup(name='ironic',
title='Ironic Options')
CONF = cfg.CONF
CONF.register_group(ironic_group)
CONF.register_opts(opts, ironic_group)
_POWER_STATE_MAP = {
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _validate_instance_and_node(icli, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
# TODO(mrda): Bug ID 1365228 icli should be renamed ironicclient
# throughout
return icli.call("node.get_by_instance_uuid", instance['uuid'])
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch,
hvtype.BAREMETAL,
vm_mode.HVM)]
def _log_ironic_polling(what, node, instance):
power_state = (None if node.power_state is None else
'"%s"' % node.power_state)
tgt_power_state = (None if node.target_power_state is None else
'"%s"' % node.target_power_state)
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'power_state=%(power_state)s, '
'target_power_state=%(tgt_power_state)s, '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
power_state=power_state,
tgt_power_state=tgt_power_state,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False}
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
# TODO(mrda): Bug ID 1365230 Logging configurability needs
# to be addressed
icli_log_level = CONF.ironic.client_log_level
if icli_log_level:
level = py_logging.getLevelName(icli_log_level)
logger = py_logging.getLogger('ironicclient')
logger.setLevel(level)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power and maintenance state.
Returns True if unacceptable.
"""
bad_states = [ironic_states.ERROR, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_states)
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
vcpus = int(node.properties.get('cpus', 0))
memory_mb = int(node.properties.get('memory_mb', 0))
local_gb = int(node.properties.get('local_gb', 0))
raw_cpu_arch = node.properties.get('cpu_arch', None)
try:
cpu_arch = arch.canonicalize(raw_cpu_arch)
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warn(_LW("cpu_arch not defined for node '%s'"), node.uuid)
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
# NOTE(dprince): we use the raw cpu_arch here because extra_specs
# filters aren't canonicalized
nodes_extra_specs['cpu_arch'] = raw_cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = node.properties.get('capabilities')
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0]] = parts[1]
else:
LOG.warn(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if node.instance_uuid:
# Node has an instance, report all resource as unavailable
vcpus_used = vcpus
memory_mb_used = memory_mb
local_gb_used = local_gb
elif self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'node': str(node.uuid),
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
'cpu_info': 'baremetal cpu',
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_total': local_gb,
'disk_used': local_gb_used,
'disk_available': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'host_memory_total': memory_mb,
'host_memory_free': memory_mb - memory_mb_used,
'supported_instances': jsonutils.dumps(
_get_nodes_supported_instances(cpu_arch)),
'stats': jsonutils.dumps(nodes_extra_specs),
'host': CONF.host,
}
dic.update(nodes_extra_specs)
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
icli = client_wrapper.IronicClientWrapper()
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance['uuid']})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance['uuid']})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _cleanup_deploy(self, context, node, instance, network_info):
icli = client_wrapper.IronicClientWrapper()
# TODO(mrda): It would be better to use instance.get_flavor() here
# but right now that doesn't include extra_specs which are required
# NOTE(pmurray): Flavor may have been deleted
ctxt = context.elevated(read_deleted="yes")
flavor = objects.Flavor.get_by_id(ctxt,
instance['instance_type_id'])
patch = patcher.create(node).get_cleanup_patch(instance, network_info,
flavor)
# Unassociate the node
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
"when unprovisioning the instance %(instance)s"),
{'node': node.uuid, 'instance': instance['uuid']})
reason = (_("Fail to clean up node %s parameters") % node.uuid)
raise exception.InstanceTerminationFailure(reason=reason)
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, icli, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
node = _validate_instance_and_node(icli, instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state == ironic_states.DELETED:
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.NOSTATE:
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance['uuid'], 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def _wait_for_power_state(self, icli, instance, message):
"""Wait for the node to complete a power state change."""
node = _validate_instance_and_node(icli, instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
_log_ironic_polling(message, node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return CONF.ironic.api_version
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
icli = client_wrapper.IronicClientWrapper()
try:
_validate_instance_and_node(icli, instance)
return True
except exception.InstanceNotFound:
return False
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
return list(n.instance_uuid for n in node_list)
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
icli = client_wrapper.IronicClientWrapper()
try:
icli.call("node.get", nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call('node.list', detail=True)
node_cache = {}
for node in node_list:
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of all nodes in the Ironic inventory.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", nodename)
return self._node_resource(node)
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a dictionary containing:
:state: the running state. One of :mod:`nova.compute.power_state`.
:max_mem: (int) the maximum memory in KBytes allowed.
:mem: (int) the memory in KBytes used by the domain.
:num_cpu: (int) the number of CPUs.
:cpu_time: (int) the CPU time used in nanoseconds. Always 0 for
this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
return {'state': map_power_state(ironic_states.NOSTATE),
'max_mem': 0,
'mem': 0,
'num_cpu': 0,
'cpu_time': 0
}
memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
if memory_kib == 0:
LOG.warn(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
num_cpu = node.properties.get('cpus', 0)
if num_cpu == 0:
LOG.warn(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
return {'state': map_power_state(node.power_state),
'max_mem': memory_kib,
'mem': memory_kib,
'num_cpu': num_cpu,
'cpu_time': 0
}
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def macs_for_instance(self, instance):
"""List the MAC addresses of an instance.
List of MAC addresses for the node which this instance is
associated with.
:param instance: the instance object.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = icli.call("node.get", instance['node'])
except ironic.exc.NotFound:
return None
ports = icli.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image dict returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise ironic.exc.BadRequest(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance['uuid'])
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor['ephemeral_gb']:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = icli.call("node.validate", node_uuid)
if not validate_chk.deploy or not validate_chk.power:
# something is wrong. undo what we have done
self._cleanup_deploy(context, node, instance, network_info)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self._cleanup_deploy(context, node, instance, network_info)
# trigger the node deploy
try:
icli.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance['uuid'],
'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(context, node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self.destroy(context, instance, network_info)
def _unprovision(self, icli, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
icli.call("node.set_provision_state", node.uuid, "deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
node = _validate_instance_and_node(icli, instance)
if not node.provision_state:
LOG.debug("Ironic node %(node)s is now unprovisioned",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= CONF.ironic.api_max_retries:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True, migrate_data=None):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
:param migrate_data: implementation specific params.
Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance['uuid'])
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in (ironic_states.ACTIVE,
ironic_states.DEPLOYFAIL,
ironic_states.ERROR,
ironic_states.DEPLOYWAIT):
self._unprovision(icli, instance, node)
self._cleanup_deploy(context, node, instance, network_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
NOTE: Ironic does not support soft-off, so this method
always performs a hard-reboot.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot. Ignored by
this driver.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
NOTE: Ironic does not support soft-off, so this method ignores
timeout and retry_interval parameters.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. Ignored by
this driver.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def get_host_stats(self, refresh=False):
"""Return the currently known stats for all Ironic nodes.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of dictionaries; each dictionary contains the
stats for a node.
"""
caps = []
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list")
for node in node_list:
data = self._node_resource(node)
caps.append(data)
return caps
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store.
Invoked when instances are added/removed to a security group.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_provider_fw_rules(self):
"""Triggers a firewall update based on database changes."""
self.firewall_driver.refresh_provider_fw_rules()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.NovaException(_(
"Ironic node: %(id)s virtual to physical interface count"
" missmatch"
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
% {'id': node.uuid,
'vif_count': len(network_info),
'pif_count': len(ports)})
if len(network_info) > 0:
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# attach what neutron needs directly to the port
port_id = unicode(vif['id'])
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
icli.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
if network_info and len(network_info) > 0:
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
icli.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor,
preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
icli.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance['uuid'],
'reason': six.text_type(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/django_countries/fields.py | 2 | from __future__ import unicode_literals
try:
from urllib import parse as urlparse
except ImportError:
import urlparse # Python 2
from django import forms
from django.db.models.fields import CharField, BLANK_CHOICE_DASH
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import lazy
from django_countries import countries, ioc_data, widgets
from django_countries.conf import settings
@python_2_unicode_compatible
class Country(object):
def __init__(self, code, flag_url=None):
self.code = code
self.flag_url = flag_url
def __str__(self):
return force_text(self.code or '')
def __eq__(self, other):
return force_text(self) == force_text(other or '')
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(force_text(self))
def __repr__(self):
if self.flag_url is None:
repr_text = "{0}(code={1})"
else:
repr_text = "{0}(code={1}, flag_url={2})"
return repr_text.format(
self.__class__.__name__, repr(self.code), repr(self.flag_url))
def __bool__(self):
return bool(self.code)
__nonzero__ = __bool__ # Python 2 compatibility.
def __len__(self):
return len(force_text(self))
@property
def name(self):
return countries.name(self.code)
@property
def alpha3(self):
return countries.alpha3(self.code)
@property
def numeric(self):
return countries.numeric(self.code)
@property
def numeric_padded(self):
return countries.numeric(self.code, padded=True)
@property
def flag(self):
if not self.code:
return ''
flag_url = self.flag_url
if flag_url is None:
flag_url = settings.COUNTRIES_FLAG_URL
url = flag_url.format(
code_upper=self.code, code=self.code.lower())
return urlparse.urljoin(settings.STATIC_URL, url)
@staticmethod
def country_from_ioc(ioc_code, flag_url=''):
code = ioc_data.IOC_TO_ISO.get(ioc_code, '')
if code == '':
return None
return Country(code, flag_url=flag_url)
@property
def ioc_code(self):
return ioc_data.ISO_TO_IOC.get(self.code, '')
class CountryDescriptor(object):
"""
A descriptor for country fields on a model instance. Returns a Country when
accessed so you can do things like::
>>> from people import Person
>>> person = Person.object.get(name='Chris')
>>> person.country.name
'New Zealand'
>>> person.country.flag
'/static/flags/nz.gif'
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
return Country(
code=instance.__dict__[self.field.name],
flag_url=self.field.countries_flag_url,
)
def __set__(self, instance, value):
if value is not None:
value = force_text(value)
instance.__dict__[self.field.name] = value
class LazyTypedChoiceField(widgets.LazyChoicesMixin, forms.TypedChoiceField):
"""
A form TypedChoiceField that respects choices being a lazy object.
"""
widget = widgets.LazySelect
def _set_choices(self, value):
"""
Also update the widget's choices.
"""
super(LazyTypedChoiceField, self)._set_choices(value)
self.widget.choices = value
class CountryField(CharField):
"""
A country field for Django models that provides all ISO 3166-1 countries as
choices.
"""
descriptor_class = CountryDescriptor
def __init__(self, *args, **kwargs):
countries_class = kwargs.pop('countries', None)
self.countries = countries_class() if countries_class else countries
self.countries_flag_url = kwargs.pop('countries_flag_url', None)
self.blank_label = kwargs.pop('blank_label', None)
kwargs.update({
'max_length': 2,
'choices': self.countries,
})
super(CharField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
def contribute_to_class(self, cls, name):
super(CountryField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'code'):
value = value.code
return super(CountryField, self).get_prep_lookup(lookup_type, value)
def pre_save(self, *args, **kwargs):
"Returns field's value just before saving."
value = super(CharField, self).pre_save(*args, **kwargs)
return self.get_prep_value(value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Convert the Country to unicode for database insertion.
if value is None or getattr(value, 'code', '') is None:
return None
return force_text(value)
def deconstruct(self):
"""
Remove choices from deconstructed field, as this is the country list
and not user editable.
Not including the ``blank_label`` property, as this isn't database
related.
"""
name, path, args, kwargs = super(CountryField, self).deconstruct()
kwargs.pop('choices')
if self.countries is not countries:
# Include the countries class if it's not the default countries
# instance.
kwargs['countries'] = self.countries.__class__
return name, path, args, kwargs
def get_choices(
self, include_blank=True, blank_choice=None, *args, **kwargs):
if blank_choice is None:
if self.blank_label is None:
blank_choice = BLANK_CHOICE_DASH
else:
blank_choice = [('', self.blank_label)]
return super(CountryField, self).get_choices(
include_blank=include_blank, blank_choice=blank_choice, *args,
**kwargs)
get_choices = lazy(get_choices, list)
def formfield(self, **kwargs):
argname = 'choices_form_class'
if argname not in kwargs:
kwargs[argname] = LazyTypedChoiceField
field = super(CharField, self).formfield(**kwargs)
if not isinstance(field, LazyTypedChoiceField):
field = self.legacy_formfield(**kwargs)
return field
def legacy_formfield(self, **kwargs):
"""
Legacy method to fix Django LTS not allowing a custom choices form
class.
"""
from django.utils.text import capfirst
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = LazyTypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
# If south is installed, ensure that CountryField will be introspected just
# like a normal CharField.
try: # pragma: no cover
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^django_countries\.fields\.CountryField'])
except ImportError:
pass
|
vicvinc/dota2ark | refs/heads/master | serverend/dev/scrapynews/scrapynews/items.py | 2 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapynewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
url = scrapy.Field()
name = scrapy.Field()
description = scrapy.Field()
size = scrapy.Field()
pass
|
sacsant/avocado-misc-tests | refs/heads/master | io/net/virt-net/veth_dlpar.py | 4 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Author: Bismruti Bidhibrata Pattjoshi<bbidhibr@in.ibm.com>
"""
Veth DLPAR operations
"""
import time
from avocado import Test
from avocado.utils import process
from avocado.utils.ssh import Session
from avocado.utils.network.hosts import LocalHost
from avocado.utils.network.interfaces import NetworkInterface
class VethdlparTest(Test):
'''
DLPAR veth script does veth device add,remove.
Update the details in yaml file.
'''
def setUp(self):
'''
Gather necessary test inputs.
'''
self.interface = self.params.get('interface', default=None)
self.ipaddr = self.params.get("host_ip", default="")
self.netmask = self.params.get("netmask", default="")
self.peer_ip = self.params.get('peer_ip', default=None)
self.num_of_dlpar = int(self.params.get("num_of_dlpar", default='1'))
self.vios_ip = self.params.get('vios_ip', '*', default=None)
self.vios_user = self.params.get('vios_username', '*', default=None)
self.vios_pwd = self.params.get('vios_pwd', '*', default=None)
self.session = Session(self.vios_ip, user=self.vios_user,
password=self.vios_pwd)
self.session.connect()
local = LocalHost()
self.networkinterface = NetworkInterface(self.interface, local)
try:
self.networkinterface.add_ipaddr(self.ipaddr, self.netmask)
self.networkinterface.save(self.ipaddr, self.netmask)
except Exception:
self.networkinterface.save(self.ipaddr, self.netmask)
self.networkinterface.bring_up()
cmd = "lscfg -l %s" % self.interface
for line in process.system_output(cmd, shell=True).decode("utf-8") \
.splitlines():
if self.interface in line:
self.slot = line.split()[-1].split('-')[-2]
cmd = "ioscli lsmap -all -net"
output = self.session.cmd(cmd)
for line in output.stdout_text.splitlines():
if self.slot in line:
self.iface = line.split()[0]
cmd = "ioscli lsmap -vadapter %s -net" % self.iface
output = self.session.cmd(cmd)
for line in output.stdout_text.splitlines():
if "SEA" in line:
self.sea = line.split()[-1]
if not self.sea:
self.cancel("failed to get SEA")
self.log.info(self.sea)
if self.networkinterface.ping_check(self.peer_ip, count=5) is not None:
self.cancel("peer connection is failed")
def veth_dlpar_remove(self):
'''
veth dlpar remove operation
'''
cmd = "rmdev -l %s" % self.sea
cmd_l = "echo \"%s\" | ioscli oem_setup_env" % cmd
output = self.session.cmd(cmd_l)
self.log.info(output.stdout_text)
if output.exit_status != 0:
self.fail("failed dlpar remove operation")
def veth_dlpar_add(self):
'''
veth dlpar add operation
'''
cmd = "mkdev -l %s" % self.sea
cmd_l = "echo \"%s\" | ioscli oem_setup_env" % cmd
output = self.session.cmd(cmd_l)
self.log.info(output.stdout_text)
if output.exit_status != 0:
self.fail("Failed dlpar add operation")
def test_dlpar(self):
'''
veth dlapr remove and add operation
'''
for _ in range(self.num_of_dlpar):
self.veth_dlpar_remove()
time.sleep(30)
self.veth_dlpar_add()
if self.networkinterface.ping_check(self.peer_ip,
count=5) is not None:
self.fail("ping failed after add operation")
def tearDown(self):
self.networkinterface.remove_ipaddr(self.ipaddr, self.netmask)
self.networkinterface.restore_from_backup()
self.session.quit()
|
faust64/ansible | refs/heads/devel | lib/ansible/modules/cloud/rackspace/rax_clb_nodes.py | 3 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
description:
- Load balancer id
node_id:
required: false
description:
- Node id
port:
required: false
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def _activate_virtualenv(path):
path = os.path.expanduser(path)
activate_this = os.path.join(path, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError as e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
|
allotria/intellij-community | refs/heads/master | python/testData/completion/mockPatchObject2/a.after.py | 15 | from unittest import mock
mock.patch.object() |
yask123/django | refs/heads/master | tests/file_storage/tests.py | 199 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test',
'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files),
{'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
self.assertRaisesMessage(
SuspiciousFileOperation, 'Storage can not find an available filename',
objs[1].limited_length.save, *(filename, ContentFile('Same Content'))
)
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_old_style_storage(self):
# Testing backward-compatibility with old-style storage backends that
# don't take ``max_length`` parameter in ``get_available_name()``
# and save(). A deprecation warning should be raised.
obj = Storage()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
obj.old_style.save('deprecated_storage_test.txt', ContentFile('Same Content'))
self.assertEqual(len(warns), 2)
self.assertEqual(
str(warns[0].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.save() will be removed in '
'Django 1.10.'
)
self.assertEqual(
str(warns[1].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.get_available_name() will '
'be removed in Django 1.10.'
)
self.assertEqual(obj.old_style.name, 'tests/deprecated_storage_test.txt')
self.assertEqual(obj.old_style.read(), b'Same Content')
obj.old_style.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
edxnercel/edx-platform | refs/heads/master | lms/djangoapps/instructor_task/models.py | 80 | """
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py schemamigration instructor_task --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/instructor_task/migrations/
ASSUMPTIONS: modules have unique IDs, even across different module_types
"""
from cStringIO import StringIO
from gzip import GzipFile
from uuid import uuid4
import csv
import json
import hashlib
import os.path
import urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, transaction
from xmodule_django.models import CourseKeyField
# define custom states used by InstructorTask
QUEUING = 'QUEUING'
PROGRESS = 'PROGRESS'
class InstructorTask(models.Model):
"""
Stores information about background tasks that have been submitted to
perform work by an instructor (or course staff).
Examples include grading and rescoring.
`task_type` identifies the kind of task being performed, e.g. rescoring.
`course_id` uses the course run's unique id to identify the course.
`task_key` stores relevant input arguments encoded into key value for testing to see
if the task is already running (together with task_type and course_id).
`task_input` stores input arguments as JSON-serialized dict, for reporting purposes.
Examples include url of problem being rescored, id of student if only one student being rescored.
`task_id` stores the id used by celery for the background task.
`task_state` stores the last known state of the celery task
`task_output` stores the output of the celery task.
Format is a JSON-serialized dict. Content varies by task_type and task_state.
`requester` stores id of user who submitted the task
`created` stores date that entry was first created
`updated` stores date that entry was last modified
"""
task_type = models.CharField(max_length=50, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
task_key = models.CharField(max_length=255, db_index=True)
task_input = models.CharField(max_length=255)
task_id = models.CharField(max_length=255, db_index=True) # max_length from celery_taskmeta
task_state = models.CharField(max_length=50, null=True, db_index=True) # max_length from celery_taskmeta
task_output = models.CharField(max_length=1024, null=True)
requester = models.ForeignKey(User, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True)
subtasks = models.TextField(blank=True) # JSON dictionary
def __repr__(self):
return 'InstructorTask<%r>' % ({
'task_type': self.task_type,
'course_id': self.course_id,
'task_input': self.task_input,
'task_id': self.task_id,
'task_state': self.task_state,
'task_output': self.task_output,
},)
def __unicode__(self):
return unicode(repr(self))
@classmethod
def create(cls, course_id, task_type, task_key, task_input, requester):
"""
Create an instance of InstructorTask.
The InstructorTask.save_now method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# create the task_id here, and pass it into celery:
task_id = str(uuid4())
json_task_input = json.dumps(task_input)
# check length of task_input, and return an exception if it's too long:
if len(json_task_input) > 255:
fmt = 'Task input longer than 255: "{input}" for "{task}" of "{course}"'
msg = fmt.format(input=json_task_input, task=task_type, course=course_id)
raise ValueError(msg)
# create the task, then save it:
instructor_task = cls(
course_id=course_id,
task_type=task_type,
task_id=task_id,
task_key=task_key,
task_input=json_task_input,
task_state=QUEUING,
requester=requester
)
instructor_task.save_now()
return instructor_task
@transaction.autocommit
def save_now(self):
"""
Writes InstructorTask immediately, ensuring the transaction is committed.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
self.save()
@staticmethod
def create_output_for_success(returned_result):
"""
Converts successful result to output format.
Raises a ValueError exception if the output is too long.
"""
# In future, there should be a check here that the resulting JSON
# will fit in the column. In the meantime, just return an exception.
json_output = json.dumps(returned_result)
if len(json_output) > 1023:
raise ValueError("Length of task output is too long: {0}".format(json_output))
return json_output
@staticmethod
def create_output_for_failure(exception, traceback_string):
"""
Converts failed result information to output format.
Traceback information is truncated or not included if it would result in an output string
that would not fit in the database. If the output is still too long, then the
exception message is also truncated.
Truncation is indicated by adding "..." to the end of the value.
"""
tag = '...'
task_progress = {'exception': type(exception).__name__, 'message': unicode(exception.message)}
if traceback_string is not None:
# truncate any traceback that goes into the InstructorTask model:
task_progress['traceback'] = traceback_string
json_output = json.dumps(task_progress)
# if the resulting output is too long, then first shorten the
# traceback, and then the message, until it fits.
too_long = len(json_output) - 1023
if too_long > 0:
if traceback_string is not None:
if too_long >= len(traceback_string) - len(tag):
# remove the traceback entry entirely (so no key or value)
del task_progress['traceback']
too_long -= (len(traceback_string) + len('traceback'))
else:
# truncate the traceback:
task_progress['traceback'] = traceback_string[:-(too_long + len(tag))] + tag
too_long = 0
if too_long > 0:
# we need to shorten the message:
task_progress['message'] = task_progress['message'][:-(too_long + len(tag))] + tag
json_output = json.dumps(task_progress)
return json_output
@staticmethod
def create_output_for_revoked():
"""Creates standard message to store in output format for revoked tasks."""
return json.dumps({'message': 'Task revoked before running'})
class ReportStore(object):
"""
Simple abstraction layer that can fetch and store CSV files for reports
download. Should probably refactor later to create a ReportFile object that
can simply be appended to for the sake of memory efficiency, rather than
passing in the whole dataset. Doing that for now just because it's simpler.
"""
@classmethod
def from_config(cls, config_name):
"""
Return one of the ReportStore subclasses depending on django
configuration. Look at subclasses for expected configuration.
"""
storage_type = getattr(settings, config_name).get("STORAGE_TYPE")
if storage_type.lower() == "s3":
return S3ReportStore.from_config(config_name)
elif storage_type.lower() == "localfs":
return LocalFSReportStore.from_config(config_name)
def _get_utf8_encoded_rows(self, rows):
"""
Given a list of `rows` containing unicode strings, return a
new list of rows with those strings encoded as utf-8 for CSV
compatibility.
"""
for row in rows:
yield [unicode(item).encode('utf-8') for item in row]
class S3ReportStore(ReportStore):
"""
Reports store backed by S3. The directory structure we use to store things
is::
`{bucket}/{root_path}/{sha1 hash of course_id}/filename`
We might later use subdirectories or metadata to do more intelligent
grouping and querying, but right now it simply depends on its own
conventions on where files are stored to know what to display. Clients using
this class can name the final file whatever they want.
"""
def __init__(self, bucket_name, root_path):
self.root_path = root_path
conn = S3Connection(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY
)
self.bucket = conn.get_bucket(bucket_name)
@classmethod
def from_config(cls, config_name):
"""
The expected configuration for an `S3ReportStore` is to have a
`GRADES_DOWNLOAD` dict in settings with the following fields::
STORAGE_TYPE : "s3"
BUCKET : Your bucket name, e.g. "reports-bucket"
ROOT_PATH : The path you want to store all course files under. Do not
use a leading or trailing slash. e.g. "staging" or
"staging/2013", not "/staging", or "/staging/"
Since S3 access relies on boto, you must also define `AWS_ACCESS_KEY_ID`
and `AWS_SECRET_ACCESS_KEY` in settings.
"""
return cls(
getattr(settings, config_name).get("BUCKET"),
getattr(settings, config_name).get("ROOT_PATH")
)
def key_for(self, course_id, filename):
"""Return the S3 key we would use to store and retrieve the data for the
given filename."""
hashed_course_id = hashlib.sha1(course_id.to_deprecated_string())
key = Key(self.bucket)
key.key = "{}/{}/{}".format(
self.root_path,
hashed_course_id.hexdigest(),
filename
)
return key
def store(self, course_id, filename, buff, config=None):
"""
Store the contents of `buff` in a directory determined by hashing
`course_id`, and name the file `filename`. `buff` is typically a
`StringIO`, but can be anything that implements `.getvalue()`.
This method assumes that the contents of `buff` are gzip-encoded (it
will add the appropriate headers to S3 to make the decompression
transparent via the browser). Filenames should end in whatever
suffix makes sense for the original file, so `.txt` instead of `.gz`
"""
key = self.key_for(course_id, filename)
_config = config if config else {}
content_type = _config.get('content_type', 'text/csv')
content_encoding = _config.get('content_encoding', 'gzip')
data = buff.getvalue()
key.size = len(data)
key.content_encoding = content_encoding
key.content_type = content_type
# Just setting the content encoding and type above should work
# according to the docs, but when experimenting, this was necessary for
# it to actually take.
key.set_contents_from_string(
data,
headers={
"Content-Encoding": content_encoding,
"Content-Length": len(data),
"Content-Type": content_type,
}
)
def store_rows(self, course_id, filename, rows):
"""
Given a `course_id`, `filename`, and `rows` (each row is an iterable of
strings), create a buffer that is a gzip'd csv file, and then `store()`
that buffer.
Even though we store it in gzip format, browsers will transparently
download and decompress it. Filenames should end in `.csv`, not `.gz`.
"""
output_buffer = StringIO()
gzip_file = GzipFile(fileobj=output_buffer, mode="wb")
csvwriter = csv.writer(gzip_file)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
gzip_file.close()
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples. `url`
can be plugged straight into an href
"""
course_dir = self.key_for(course_id, '')
return [
(key.key.split("/")[-1], key.generate_url(expires_in=300))
for key in sorted(self.bucket.list(prefix=course_dir.key), reverse=True, key=lambda k: k.last_modified)
]
class LocalFSReportStore(ReportStore):
"""
LocalFS implementation of a ReportStore. This is meant for debugging
purposes and is *absolutely not for production use*. Use S3ReportStore for
that. We use this in tests and for local development. When it generates
links, it will make file:/// style links. That means you actually have to
copy them and open them in a separate browser window, for security reasons.
This lets us do the cheap thing locally for debugging without having to open
up a separate URL that would only be used to send files in dev.
"""
def __init__(self, root_path):
"""
Initialize with root_path where we're going to store our files. We
will build a directory structure under this for each course.
"""
self.root_path = root_path
if not os.path.exists(root_path):
os.makedirs(root_path)
@classmethod
def from_config(cls, config_name):
"""
Generate an instance of this object from Django settings. It assumes
that there is a dict in settings named GRADES_DOWNLOAD and that it has
a ROOT_PATH that maps to an absolute file path that the web app has
write permissions to. `LocalFSReportStore` will create any intermediate
directories as needed. Example::
STORAGE_TYPE : "localfs"
ROOT_PATH : /tmp/edx/report-downloads/
"""
return cls(getattr(settings, config_name).get("ROOT_PATH"))
def path_to(self, course_id, filename):
"""Return the full path to a given file for a given course."""
return os.path.join(self.root_path, urllib.quote(course_id.to_deprecated_string(), safe=''), filename)
def store(self, course_id, filename, buff, config=None): # pylint: disable=unused-argument
"""
Given the `course_id` and `filename`, store the contents of `buff` in
that file. Overwrite anything that was there previously. `buff` is
assumed to be a StringIO objecd (or anything that can flush its contents
to string using `.getvalue()`).
"""
full_path = self.path_to(course_id, filename)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.mkdir(directory)
with open(full_path, "wb") as f:
f.write(buff.getvalue())
def store_rows(self, course_id, filename, rows):
"""
Given a course_id, filename, and rows (each row is an iterable of strings),
write this data out.
"""
output_buffer = StringIO()
csvwriter = csv.writer(output_buffer)
csvwriter.writerows(self._get_utf8_encoded_rows(rows))
self.store(course_id, filename, output_buffer)
def links_for(self, course_id):
"""
For a given `course_id`, return a list of `(filename, url)` tuples. `url`
can be plugged straight into an href. Note that `LocalFSReportStore`
will generate `file://` type URLs, so you'll need to copy the URL and
open it in a new browser window. Again, this class is only meant for
local development.
"""
course_dir = self.path_to(course_id, '')
if not os.path.exists(course_dir):
return []
files = [(filename, os.path.join(course_dir, filename)) for filename in os.listdir(course_dir)]
files.sort(key=lambda (filename, full_path): os.path.getmtime(full_path), reverse=True)
return [
(filename, ("file://" + urllib.quote(full_path)))
for filename, full_path in files
]
|
StrellaGroup/erpnext | refs/heads/develop | erpnext/accounts/doctype/pos_field/pos_field.py | 5 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class POSField(Document):
pass
|
vitaly-krugl/nupic | refs/heads/master | examples/opf/experiments/anomaly/spatial/simple/description.py | 10 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'field0': dict(fieldname='field0', n=100, name='field0', type='SDRCategoryEncoder', w=21),
'p': dict(fieldname='p', n=100, name='p', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
nQuantums/gantest | refs/heads/master | train.py | 1 | """学習実行処理.
"""
import argparse
import numpy as np
import cv2
import dnn
import dsconf
import selector
def main():
# コマンドライン引数解析
parser = argparse.ArgumentParser(description='Training of specified model.')
parser.add_argument("modelName", help='Model name. Currently CycleGAN only.')
parser.add_argument("--epoch", "-e", type=int, default=10, help='Number of epochs.')
parser.add_argument("--batch_size", "-b", type=int, default=2, help='Batch size.')
parser.add_argument("--gpu", "-g", type=int, default=0, help='GPU device number. Use numpy if specified -1.')
args = parser.parse_args()
batch_size = args.batch_size
# GPUなどの環境初期化
dnn.Init(args.gpu)
# 指定されたモデルのルート取得
mr = selector.SelectByName(args.modelName)
# 学習用データセット読み込み
print("Loading datasets...")
ds = mr.GetDsLoader().Load(dsconf.GetDir())
print("Done.")
# 学習モデル作成
m = mr.CreateModel(args.modelName, dsconf.InChs, dsconf.OutChs, batch_size)
# 学習済みデータがあれば読み込む
print("Loading trained data...")
m.load()
print("Done.")
# バッチサイズ分のCPU上でのメモリ領域、一旦ここに展開してから toGpu すると無駄が少ない
xcpu = np.zeros((batch_size, dsconf.InChs, dsconf.MaxImageSize[0], dsconf.MaxImageSize[1]), dtype=dnn.dtype)
tcpu = np.zeros((batch_size, dsconf.OutChs, dsconf.MaxImageSize[0], dsconf.MaxImageSize[1]), dtype=dnn.dtype)
# 学習ループ
requestQuit = False
iterCount = 0
for epoch in range(args.epoch):
if requestQuit:
break
print("Epoch", epoch)
for b in range(len(ds) // batch_size):
# バッチセットアップ
for i in range(batch_size):
index = b * batch_size + i
d = ds[index]
xcpu[i, :, :, :], tcpu[i, :, :, :] = d.get()
# 学習実行
m.train(dnn.ToGpu(xcpu), dnn.ToGpu(tcpu), iterCount % 10 == 0, iterCount % 10 == 0)
iterCount += 1
# OpenCVウィンドウアクティブにしてESCキーで中断できるようにしておく
k = cv2.waitKey(1)
if k == 27:
requestQuit = True
break
# 学習結果を保存
print("Saving trained data...")
m.save()
print("Done.")
if __name__ == "__main__":
main()
|
p4datasystems/CarnotKEdist | refs/heads/master | dist/Lib/sysconfig.py | 11 | """Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'java': {
'stdlib': '{base}/lib/jython',
'platstdlib': '{base}/lib/jython',
'purelib': '{base}/lib/jython',
'platlib': '{base}/lib/jython',
'include': '{base}/include/jython',
'platinclude': '{base}/include/jython',
'scripts': '{base}/bin',
'data' : '{base}',
},
'java_user': {
'stdlib': '{userbase}/lib/jython{py_version_short}',
'platstdlib': '{userbase}/lib/jython{py_version_short}',
'purelib': '{userbase}/lib/jython{py_version_short}/site-packages',
'platlib': '{userbase}/lib/jython{py_version_short}/site-packages',
'include': '{userbase}/include/jython{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix) if sys.prefix is not None else None
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) if sys.exec_prefix is not None else None
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt', 'java'):
try:
value = os.path.expanduser(value)
except ImportError:
pass # ignore missing pwd if no native posix for Jython
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt" or os._name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
if env_base:
return env_base
try:
return joinuser("~", ".local")
except:
return None # SecurityManager prevents this for Jython
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def _get_makefile_filename():
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
|
codasus/django-blogages | refs/heads/master | blogages/django/contrib/gis/db/backends/postgis/creation.py | 308 | from django.conf import settings
from django.db.backends.postgresql.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns
if f.geography:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
|
Jionglun/-w16b_test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/linecache.py | 785 | """Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
|
fengyqf/shadowsocks | refs/heads/master | shadowsocks/__init__.py | 1084 | #!/usr/bin/python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
|
evidation-health/bokeh | refs/heads/master | bokeh/charts/builder/boxplot_builder.py | 2 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the BoxPlot class which lets you build your BoxPlot plots just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from .._builder import create_and_build
from ...models import Range1d
from ...properties import Bool, String
from .bar_builder import BarBuilder
from ..glyphs import BoxGlyph
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def BoxPlot(data, label=None, values=None, color=None, stack=None, group=None, agg="sum", xscale="categorical",
yscale="linear",
xgrid=False, ygrid=True, continuous_range=None, **kw):
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['stack'] = stack
kw['group'] = group
kw['agg'] = agg
kw['xscale'] = xscale
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
return create_and_build(BoxPlotBuilder, data, **kw)
class BoxPlotBuilder(BarBuilder):
"""This is the BoxPlot class and it is in charge of plotting
scatter plots in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (rects, lines and markers)
taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String(help="""
The marker type to use (e.g., ``circle``) if outliers=True.
""")
outliers = Bool(help="""
Whether to display markers for any outliers.
""")
glyph = BoxGlyph
# def _process_data(self):
# """Take the BoxPlot data from the input **value.
#
# It calculates the chart properties accordingly. Then build a dict
# containing references to all the calculated points to be used by
# the quad, segments and markers glyphs inside the ``_yield_renderers`` method.
#
# Args:
# cat (list): categories as a list of strings.
# marker (int or string, optional): if outliers=True, the marker type to use
# e.g., ``circle``.
# outliers (bool, optional): Whether to plot outliers.
# values (dict or pd obj): the values to be plotted as bars.
# """
# self._data_segment = dict()
# self._attr_segment = []
# self._data_rect = dict()
# self._attr_rect = []
# self._data_scatter = dict()
# self._attr_scatter = []
# self._data_legend = dict()
#
# if isinstance(self._values, pd.DataFrame):
# self._groups = self._values.columns
# else:
# self._groups = list(self._values.keys())
#
# # add group to the self._data_segment dict
# self._data_segment["groups"] = self._groups
#
# # add group and witdh to the self._data_rect dict
# self._data_rect["groups"] = self._groups
# self._data_rect["width"] = [0.8] * len(self._groups)
# self._data_scatter does not need references to groups now,
# they will be added later.
# # add group to the self._data_legend dict
# self._data_legend["groups"] = self._groups
#
# # all the list we are going to use to save calculated values
# q0_points = []
# q2_points = []
# iqr_centers = []
# iqr_lengths = []
# lower_points = []
# upper_points = []
# upper_center_boxes = []
# upper_height_boxes = []
# lower_center_boxes = []
# lower_height_boxes = []
# out_x, out_y, out_color = ([], [], [])
# colors = cycle_colors(self._groups, self.palette)
#
# for i, (level, values) in enumerate(self._values.items()):
# # Compute quantiles, center points, heights, IQR, etc.
# # quantiles
# q = np.percentile(values, [25, 50, 75])
# q0_points.append(q[0])
# q2_points.append(q[2])
#
# # IQR related stuff...
# iqr_centers.append((q[2] + q[0]) / 2)
# iqr = q[2] - q[0]
# iqr_lengths.append(iqr)
# lower = q[0] - 1.5 * iqr
# upper = q[2] + 1.5 * iqr
# lower_points.append(lower)
# upper_points.append(upper)
#
# # rect center points and heights
# upper_center_boxes.append((q[2] + q[1]) / 2)
# upper_height_boxes.append(q[2] - q[1])
# lower_center_boxes.append((q[1] + q[0]) / 2)
# lower_height_boxes.append(q[1] - q[0])
#
# # Store indices of outliers as list
# outliers = np.where(
# (values > upper) | (values < lower)
# )[0]
# for out in outliers:
# o = values[out]
# out_x.append(level)
# out_y.append(o)
# out_color.append(colors[i])
#
# # Store
# self.set_and_get(self._data_scatter, self._attr_scatter, "out_x", out_x)
# self.set_and_get(self._data_scatter, self._attr_scatter, "out_y", out_y)
# self.set_and_get(self._data_scatter, self._attr_scatter, "colors", out_color)
#
# self.set_and_get(self._data_segment, self._attr_segment, "q0", q0_points)
# self.set_and_get(self._data_segment, self._attr_segment, "lower", lower_points)
# self.set_and_get(self._data_segment, self._attr_segment, "q2", q2_points)
# self.set_and_get(self._data_segment, self._attr_segment, "upper", upper_points)
#
# self.set_and_get(self._data_rect, self._attr_rect, "iqr_centers", iqr_centers)
# self.set_and_get(self._data_rect, self._attr_rect, "iqr_lengths", iqr_lengths)
# self.set_and_get(self._data_rect, self._attr_rect, "upper_center_boxes", upper_center_boxes)
# self.set_and_get(self._data_rect, self._attr_rect, "upper_height_boxes", upper_height_boxes)
# self.set_and_get(self._data_rect, self._attr_rect, "lower_center_boxes", lower_center_boxes)
# self.set_and_get(self._data_rect, self._attr_rect, "lower_height_boxes", lower_height_boxes)
# self.set_and_get(self._data_rect, self._attr_rect, "colors", colors)
#def _set_ranges(self):
#"Push the BoxPlot data into the ColumnDataSource and calculate the proper ranges."
# self._source_segment = ColumnDataSource(self._data_segment)
# self._source_scatter = ColumnDataSource(self._data_scatter)
# self._source_rect = ColumnDataSource(self._data_rect)
# self._source_legend = ColumnDataSource(self._data_legend)
# self.x_range = FactorRange(factors=self._source_segment.data["groups"])
#
# start_y = min(self._data_segment[self._attr_segment[1]])
# end_y = max(self._data_segment[self._attr_segment[3]])
#
# ## Expand min/max to encompass outliers
# if self.outliers and self._data_scatter[self._attr_scatter[1]]:
# start_out_y = min(self._data_scatter[self._attr_scatter[1]])
# end_out_y = max(self._data_scatter[self._attr_scatter[1]])
# # it could be no outliers in some sides...
# start_y = min(start_y, start_out_y)
# end_y = max(end_y, end_out_y)
# self.y_range = Range1d(start=start_y - 0.1 * (end_y - start_y),
# end=end_y + 0.1 * (end_y - start_y))
#pass
# def _yield_renderers(self):
# """Use the several glyphs to display the Boxplot.
#
# It uses the selected marker glyph to display the points, segments to
# display the iqr and rects to display the boxes, taking as reference
# points the data loaded at the ColumnDataSurce.
# """
# ats = self._attr_segment
#
# glyph = Segment(
# x0="groups", y0=ats[1], x1="groups", y1=ats[0],
# line_color="black", line_width=2
# )
# yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
#
# glyph = Segment(
# x0="groups", y0=ats[2], x1="groups", y1=ats[3],
# line_color="black", line_width=2
# )
# yield GlyphRenderer(data_source=self._source_segment, glyph=glyph)
#
# atr = self._attr_rect
#
# glyph = Rect(
# x="groups", y=atr[0], width="width", height=atr[1],
# line_color="black", line_width=2, fill_color=None,
# )
# yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
#
# glyph = Rect(
# x="groups", y=atr[2], width="width", height=atr[3],
# line_color="black", fill_color=atr[6],
# )
# yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
#
# glyph = Rect(
# x="groups", y=atr[4], width="width", height=atr[5],
# line_color="black", fill_color=atr[6],
# )
# yield GlyphRenderer(data_source=self._source_rect, glyph=glyph)
#
# if self.outliers:
# yield make_scatter(self._source_scatter, self._attr_scatter[0],
# self._attr_scatter[1], self.marker,
# self._attr_scatter[2])
# Some helper methods
# def set_and_get(self, data, attr, val, content):
# """Set a new attr and then get it to fill the self._data dict.
#
# Keep track of the attributes created.
#
# Args:
# data (dict): where to store the new attribute content
# attr (list): where to store the new attribute names
# val (string): name of the new attribute
# content (obj): content of the new attribute
# """
# self._set_and_get(data, "", attr, val, content)
|
mailfish/helena | refs/heads/master | gallery/urls.py | 1 | from django.conf.urls import patterns, url
from .views import PhotoListView
urlpatterns = patterns('',
url(r'^(?P<slug>[\w-]+)/$', PhotoListView.as_view(), name='image'),
) |
Asimmetric/influxdb-python | refs/heads/master | influxdb/dataframe_client.py | 2 | # -*- coding: utf-8 -*-
"""
DataFrame client for InfluxDB
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['DataFrameClient']
try:
import pandas
del pandas
except ImportError as err:
from .client import InfluxDBClient
class DataFrameClient(InfluxDBClient):
err = err
def __init__(self, *a, **kw):
raise ImportError("DataFrameClient requires Pandas "
"which couldn't be imported: %s" % self.err)
else:
from ._dataframe_client import DataFrameClient
|
hdiomede/status-page | refs/heads/master | run.py | 13 | from app import app
app.run(debug=True) |
Comunitea/CMNT_00098_2017_JIM_addons | refs/heads/master | base_partner_sequence/tests/__init__.py | 5 | # -*- coding: utf-8 -*-
# Copyright 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright 2013 initOS GmbH & Co. KG (<http://www.initos.com>).
# Copyright 2016 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_base_partner_sequence
|
mantidproject/mantid | refs/heads/master | Framework/PythonInterface/plugins/algorithms/MatchSpectra.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import AlgorithmFactory, MatrixWorkspaceProperty, PythonAlgorithm
from mantid.kernel import Direction, FloatArrayProperty
from mantid.simpleapi import ConvertToMatrixWorkspace
import numpy as np
class MatchSpectra(PythonAlgorithm):
def category(self):
return 'Diffraction\\Reduction'
#def seeAlso(self):
# return ['']
def name(self):
return "MatchSpectra"
def summary(self):
return "Calculate factors to most closely match all spectra to reference spectrum"
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty('InputWorkspace', '',
Direction.Input),
doc='Workspace to match the spectra between')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '',
Direction.Output),
doc='Workspace with the spectra matched')
self.declareProperty('ReferenceSpectrum', 1,
doc='Spectrum to match other spectra to')
self.declareProperty('CalculateOffset', True,
doc='Calculate vertical shift')
self.declareProperty('CalculateScale', True,
doc='Calculate scale factor')
self.declareProperty(FloatArrayProperty('Offset', values=[],
direction=Direction.Output),
'Additive factor from matching')
self.declareProperty(FloatArrayProperty('Scale', values=[],
direction=Direction.Output),
'Multiplicitive factor from matching')
self.declareProperty(FloatArrayProperty('ChiSq', values=[],
direction=Direction.Output),
'Unweighted ChiSq between the spectrum and the reference. '
'NaN means that the spectrum was not matched')
def __getReferenceWsIndex(self):
refSpectrum = self.getProperty('ReferenceSpectrum').value
inputWS = self.getProperty('InputWorkspace').value
for wkspIndex in range(inputWS.getNumberHistograms()):
if inputWS.getSpectrum(wkspIndex).getSpectrumNo() == refSpectrum:
return wkspIndex
raise RuntimeError('Failed to find spectrum {} in workspace "{}"'.format(refSpectrum, inputWS))
def __createOutputWS(self):
'''Convert to a Workspace2D despite what the algorithm is named'''
outputWS = ConvertToMatrixWorkspace(InputWorkspace=self.getPropertyValue('InputWorkspace'),
OutputWorkspace=self.getPropertyValue("OutputWorkspace"))
return outputWS
def __generateIndices(self, spectrumNum, reference, testing, binBoundaries):
'''Generates the indices for slicing by comparing x-axes
A note about implementation: If numpy.searchsorted fails to find the
value, it returns the last index of the array.
'''
BAD_RESULT = (False, (0, 0), (0, 0))
# find the lower bounds
refLower = 0
tstLower = 0
if reference[0] == testing[0]:
pass # values are already set
elif reference[0] < testing[0]:
refLower = np.searchsorted(reference, testing[0])
if refLower == reference.size:
msg = 'Falied to find {} in reference spectrum x-axis (spectrum={})'.format(testing[0], spectrumNum)
self.log().notice(msg)
return BAD_RESULT
else:
tstLower = np.searchsorted(testing, reference[0])
if tstLower == testing.size:
msg = 'Falied to find {} in the x-axis of the spectrum being matched (spectrum={})'.format(reference[0],
spectrumNum)
self.log().notice(msg)
return BAD_RESULT
# find the upper bounds
refUpper = reference.size-1
tstUpper = testing.size-1
if binBoundaries:
refUpper -= 1
tstUpper -= 1
if reference[refUpper] == testing[tstUpper]:
pass # values are already set
elif reference[refUpper] < testing[tstUpper]:
tstUpper = np.searchsorted(testing, reference[refUpper])
if reference[refUpper] != testing[tstUpper]:
msg = 'Falied to find {} in the x-axis of the spectrum being matched (spectrum={})'.format(reference[-1],
spectrumNum)
self.log().notice(msg)
return BAD_RESULT
else:
refUpper = np.searchsorted(reference, testing[tstUpper])
if reference[refUpper] != testing[tstUpper]:
msg = 'Falied to find {} in reference spectrum x-axis (spectrum={})'.format(testing[-1], spectrumNum)
self.log().notice(msg)
return BAD_RESULT
if (reference[refLower:refUpper]).size != (testing[tstLower:tstUpper]).size:
self.log().notice(msg)
return BAD_RESULT
return (True, (refLower, refUpper), (tstLower, tstUpper))
def __residual(self, X, Y1, Y2):
deltaX = np.diff(X)
deltaX = np.append(deltaX, deltaX[-1]) # add the last value to the end
return (np.square(Y1 - Y2)*deltaX).sum() / deltaX.sum()
def PyExec(self):
referenceWkspIndex = self.__getReferenceWsIndex()
outputWS = self.__createOutputWS()
# determine what to calculate
doScale = self.getProperty('CalculateScale').value
doOffset = self.getProperty('CalculateOffset').value
referenceX = outputWS.readX(referenceWkspIndex)
referenceY = outputWS.readY(referenceWkspIndex)
referenceE = outputWS.readE(referenceWkspIndex)
if not np.any(referenceE > 0.):
raise RuntimeError('None of the uncertainties in the reference spectrum '
'is greater than zero. No data would be used.')
resultOffset = []
resultScale = []
resultResidual = []
# this is just gauss-markov theorem
for wkspIndex in range(outputWS.getNumberHistograms()): # in nb which appears to be number of banks
spectrumNum = outputWS.getSpectrum(wkspIndex).getSpectrumNo()
if wkspIndex == referenceWkspIndex:
resultOffset.append(0.)
resultScale.append(1.)
resultResidual.append(0.)
self.log().information('spectrum {} is the reference'.format(spectrumNum))
continue
X = outputWS.readX(wkspIndex)
Y = outputWS.readY(wkspIndex)
E = outputWS.readE(wkspIndex)
if not np.any(E > 0.):
self.log().warning('None of the uncertainties in the reference spectrum {} is greater than zero'.format(spectrumNum))
resultOffset.append(0.)
resultScale.append(1.)
resultResidual.append(np.nan)
continue
hasOverlap, refIndices, tstIndices = self.__generateIndices(spectrumNum, referenceX, X, X.size == Y.size+1)
if not hasOverlap:
resultOffset.append(0.)
resultScale.append(1.)
resultResidual.append(np.nan)
continue
mask = (E[tstIndices[0]:tstIndices[1]] > 0.) * (referenceE[refIndices[0]:refIndices[1]] > 0.)
if not np.any(mask):
resultOffset.append(0.)
resultScale.append(1.)
resultResidual.append(np.nan)
self.log().warning('The overlap region of spectrum {} has no uncertainties greater than zero'.format(spectrumNum))
continue
totalBins = mask.sum() # number of bins being used
# only calculate the terms that are needed
if doOffset:
sumRef = referenceY[refIndices[0]:refIndices[1]][mask].sum()
sumSpec = Y[tstIndices[0]:tstIndices[1]][mask].sum()
if doScale:
sumSpecSq = (Y[tstIndices[0]:tstIndices[1]][mask] * Y[tstIndices[0]:tstIndices[1]][mask]).sum()
sumRefSpec = (Y[tstIndices[0]:tstIndices[1]][mask] * referenceY[refIndices[0]:refIndices[1]][mask]).sum()
# defaults are to do nothing
scale = 1.
offset = 0.
if doScale and doOffset: # use both
# Cramar's rule for 2x2 matrix
denominator = totalBins * sumSpecSq - sumSpec * sumSpec
scale = (totalBins * sumRefSpec - sumRef * sumSpec) / denominator
offset = (sumRef * sumSpecSq - sumSpec * sumRefSpec) / denominator
elif doScale and not doOffset: # only scale
scale = sumRefSpec / sumSpecSq
elif doOffset and not doScale: # only shift
offset = (sumRef - sumSpec) / totalBins
# calculate the residual of the fit - must be done before updating values
residual = self.__residual(X[tstIndices[0]:tstIndices[1]][mask],
Y[tstIndices[0]:tstIndices[1]][mask] * scale + offset,
referenceY[refIndices[0]:refIndices[1]][mask])
resultResidual.append(residual)
msg = 'spectrum {} chisq '.format(spectrumNum) \
+ 'before={} '.format(self.__residual(X[tstIndices[0]:tstIndices[1]][mask],
Y[tstIndices[0]:tstIndices[1]][mask],
referenceY[refIndices[0]:refIndices[1]][mask])) \
+ 'after={}'.format(residual)
self.log().information(msg)
# update the values in the output workspace
Ynew = np.copy(Y)
Ynew[E > 0.] = Ynew[E > 0.] * scale + offset
outputWS.setY(wkspIndex, Ynew)
outputWS.setE(wkspIndex, E * scale) # background doesn't matter because there isn't uncertainty
resultOffset.append(offset)
resultScale.append(scale)
# set output properties
self.setProperty('OutputWorkspace', outputWS)
self.setProperty('Offset', resultOffset)
self.setProperty('Scale', resultScale)
self.setProperty('ChiSq', resultResidual)
# Register algorithm with Mantid.
AlgorithmFactory.subscribe(MatchSpectra)
|
bplancher/odoo | refs/heads/9.0 | addons/stock_calendar/__openerp__.py | 18 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Calendars on Orderpoints',
'version': '1.0',
'summary': 'Calendars ',
'description': """
The stock_calendar module handles minimum stock rules (=orderpoints / reordering rules) differently by
the possibility to take into account the purchase and delivery calendars.
Normally, the scheduler will go through all orderpoints and will create a procurement with a quantity taking
into account the current stock and all future stock moves. For companies working with fresh products, this is
a problem, because if you order the products needed over 2 weeks now and they arrive tomorrow, then
these products won't be fresh anymore in two weeks.
To solve this, we added a delivery calendar to the orderpoint. The future stock moves (they represent the needs)
taken into account will be limited to those until the second delivery according to the calendar.
So if I am delivered every week on Tuesday and on Friday, when I order on Monday, I will be delivered on Tuesday
with all what is needed until Friday.
This however is not good enough as you want to create a purchase order only before the date of the delivery as the
future needs might change. (otherwise you could have ordered too much already) For this, we added a
purchase calendar and the orderpoint will only be triggered when the scheduler is run within the time specified
by the purchase calendar. (a last execution date will also check if it has not already been triggered within this time)
However, sometimes we have double orders: suppose we need to order twice on Friday: a purchase order for Monday
and a purchase order for Tuesday. Then we need to have two orders at the same time.
To handle this, we put a procurement group on the calendar line and for the purchase calendar line we need to do,
we will check the corresponding delivery line. On the procurement group, we can tell to propagate itself to the purchase
and this way it is possible to have an order for Monday and one for Tuesday.
With normal orderpoints, the dates put on the purchase order are based on the delays in the system for the product/company.
This does not correspond to what is done with the calendars, so the purchase/delivery dates will be set according to the calendars also.
The calendars we use are on weekly basis. It is possible however to have a start date and end date for e.g. the Tuesday delivery.
It is also possible to put exceptions for days when there is none.
""",
'website': 'https://www.odoo.com/page/warehouse',
'images': [],
'depends': ['purchase', 'resource'],
'category': 'Warehouse',
'sequence': 16,
'demo': [
],
'data': [
'stock_calendar_view.xml'
],
'test': [
'test/orderpoint_calendar.yml'
],
'installable': True,
'application': False,
'auto_install': False,
'qweb': [],
}
|
ennoborg/gramps | refs/heads/master | gramps/plugins/lib/maps/geography.py | 1 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2016 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
import os
import re
import time
from gi.repository import GLib
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventType, Place, PlaceRef, PlaceName
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gui.views.navigationview import NavigationView
from gramps.gen.utils.libformatting import FormattingHelper
from gramps.gen.errors import WindowActiveError
from gramps.gen.const import HOME_DIR
from gramps.gen.config import config
from gramps.gui.editors import EditPlace, EditEvent, EditFamily, EditPerson
from gramps.gui.selectors.selectplace import SelectPlace
import gi
gi.require_version('OsmGpsMap', '1.0')
from gi.repository import OsmGpsMap as osmgpsmap
from . import constants
from .osmgps import OsmGps
from .selectionlayer import SelectionLayer
from .placeselection import PlaceSelection
from .cairoprint import CairoPrintSave
from .libkml import Kml
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("maps.geography")
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
GEOGRAPHY_PATH = os.path.join(HOME_DIR, "maps")
#-------------------------------------------------------------------------
#
# Functions and variables
#
#-------------------------------------------------------------------------
PLACE_REGEXP = re.compile('<span background="green">(.*)</span>')
PLACE_STRING = '<span background="green">%s</span>'
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=no-member
# pylint: disable=maybe-no-member
def _get_sign(value):
"""
return 1 if we have a negative number, 0 in other case
"""
if value < 0.0:
return 1
else:
return 0
#-------------------------------------------------------------------------
#
# GeoGraphyView
#
#-------------------------------------------------------------------------
class GeoGraphyView(OsmGps, NavigationView):
"""
View for pedigree tree.
Displays the ancestors of a selected individual.
"""
#settings in the config file
CONFIGSETTINGS = (
('geography.path', GEOGRAPHY_PATH),
('geography.zoom', 10),
('geography.zoom_when_center', 12),
('geography.show_cross', True),
('geography.lock', False),
('geography.center-lat', 0.0),
('geography.center-lon', 0.0),
('geography.map_service', constants.OPENSTREETMAP),
('geography.max_places', 5000),
('geography.use-keypad', True),
)
def __init__(self, title, pdata, dbstate, uistate,
bm_type, nav_group):
NavigationView.__init__(self, title, pdata, dbstate, uistate,
bm_type, nav_group)
OsmGps.__init__(self, uistate)
self.dbstate = dbstate
self.dbstate.connect('database-changed', self.change_db)
self.dbstate.connect('no-database', self.clear_view)
self.default_text = "Enter location here!"
self.centerlon = config.get("geography.center-lon")
self.centerlat = config.get("geography.center-lat")
self.zoom = config.get("geography.zoom")
self.lock = config.get("geography.lock")
if config.get('geography.path') == "":
config.set('geography.path', GEOGRAPHY_PATH)
self.format_helper = FormattingHelper(self.dbstate)
self.centerlat = self.centerlon = 0.0
self.cross_map = None
self.current_map = None
self.without = 0
self.place_list = []
self.places_found = []
self.select_fct = None
self.geo_mainmap = None
theme = Gtk.IconTheme.get_default()
self.geo_mainmap = theme.load_surface('gramps-geo-mainmap', 48, 1,
None, 0)
self.geo_altmap = theme.load_surface('gramps-geo-altmap', 48, 1,
None, 0)
if (config.get('geography.map_service') in
(constants.OPENSTREETMAP,
constants.MAPS_FOR_FREE,
constants.OPENCYCLEMAP,
constants.OSM_PUBLIC_TRANSPORT,
)):
default_image = self.geo_mainmap
else:
default_image = self.geo_altmap
self.geo_othermap = {}
for ident in (EventType.BIRTH,
EventType.DEATH,
EventType.MARRIAGE):
icon = constants.ICONS.get(int(ident))
self.geo_othermap[ident] = theme.load_surface(icon, 48, 1, None, 0)
self.maxyear = 0
self.minyear = 9999
self.maxlat = 0.0
self.minlat = 0.0
self.maxlon = 0.0
self.minlon = 0.0
self.longt = 0.0
self.latit = 0.0
self.itemoption = None
self.menu = None
self.mark = None
self.path_entry = None
self.changemap = None
self.clearmap = None
self.nbplaces = 0
def add_bookmark(self, menu):
"""
Add the place to the bookmark
"""
mlist = self.selected_handles()
if mlist:
self.bookmarks.add(mlist[0])
else:
from gramps.gui.dialog import WarningDialog
WarningDialog(
_("Could Not Set a Bookmark"),
_("A bookmark could not be set because "
"no one was selected."),
parent=self.uistate.window)
def add_bookmark_from_popup(self, menu, handle):
"""
Add the place to the bookmark from the popup menu
"""
if handle:
self.uistate.set_active(handle, self.navigation_type())
self.bookmarks.add(handle)
self.bookmarks.redraw()
else:
from gramps.gui.dialog import WarningDialog
WarningDialog(
_("Could Not Set a Bookmark"),
_("A bookmark could not be set because "
"no one was selected."),
parent=self.uistate.window)
def change_page(self):
"""
Called when the page changes.
"""
NavigationView.change_page(self)
self.uistate.clear_filter_results()
self.end_selection = None
if self.osm:
self.osm.grab_focus()
self.set_crosshair(config.get("geography.show_cross"))
def do_size_request(self, requisition):
"""
Overridden method to handle size request events.
"""
requisition.width = 400
requisition.height = 300
def do_get_preferred_width(self):
""" GTK3 uses width for height sizing model. This method will
override the virtual method
"""
req = Gtk.Requisition()
self.do_size_request(req)
return req.width, req.width
def do_get_preferred_height(self):
""" GTK3 uses width for height sizing model. This method will
override the virtual method
"""
req = Gtk.Requisition()
self.do_size_request(req)
return req.height, req.height
def on_delete(self):
"""
Save all modified environment
"""
NavigationView.on_delete(self)
self._config.save()
def clear_view(self):
self.place_list = []
self.remove_all_markers()
self.remove_all_gps()
self.remove_all_tracks()
self.message_layer.clear_messages()
def change_db(self, dbse):
"""
Callback associated with DbState. Whenever the database
changes, this task is called. In this case, we rebuild the
columns, and connect signals to the connected database. Tree
is no need to store the database, since we will get the value
from self.state.db
"""
if self.active:
self.bookmarks.redraw()
self.build_tree()
if self.osm:
self.osm.grab_focus()
self.set_crosshair(config.get("geography.show_cross"))
def can_configure(self):
"""
See :class:`~gui.views.pageview.PageView
:return: bool
"""
return True
def define_actions(self):
"""
Required define_actions function for PageView. Builds the action
group information required.
As this function is overriden in some plugins, we need to call
another method.
"""
NavigationView.define_actions(self)
self.define_print_actions()
def define_print_actions(self):
"""
Associate the print button to the PrintView action.
"""
self._add_action('PrintView', 'document-print', _("_Print..."),
accel="<PRIMARY>P",
tip=_("Print or save the Map"),
callback=self.printview)
def config_connect(self):
"""
Overwriten from :class:`~gui.views.pageview.PageView method
This method will be called after the ini file is initialized,
use it to monitor changes in the ini file
"""
self._config.connect("geography.path",
self.set_path)
self._config.connect("geography.zoom_when_center",
self.set_zoom_when_center)
def set_path(self, client, cnxn_id, entry, data):
"""
All geography views must have the same path for maps
"""
config.set("geography.path", entry)
def set_zoom_when_center(self, client, cnxn_id, entry, data):
"""
All geography views must have the same zoom_when_center for maps
"""
config.set("geography.zoom_when_center", int(entry))
#-------------------------------------------------------------------------
#
# Map Menu
#
#-------------------------------------------------------------------------
def build_nav_menu(self, obj, event, lat, lon):
"""
Builds the menu for actions on the map.
"""
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title(_('Map Menu'))
if config.get("geography.show_cross"):
title = _('Remove cross hair')
else:
title = _('Add cross hair')
add_item = Gtk.MenuItem(label=title)
add_item.connect("activate", self.config_crosshair, event, lat, lon)
add_item.show()
menu.append(add_item)
if config.get("geography.lock"):
title = _('Unlock zoom and position')
else:
title = _('Lock zoom and position')
add_item = Gtk.MenuItem(label=title)
add_item.connect("activate", self.config_zoom_and_position,
event, lat, lon)
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Add place"))
add_item.connect("activate", self.add_place, event, lat, lon)
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Link place"))
add_item.connect("activate", self.link_place, event, lat, lon)
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Add place from kml"))
add_item.connect("activate", self.add_place_from_kml, event, lat, lon)
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Center here"))
add_item.connect("activate", self.set_center, event, lat, lon)
add_item.show()
menu.append(add_item)
# Add specific module menu
self.add_specific_menu(menu, event, lat, lon)
# Add a separator line
add_item = Gtk.MenuItem()
add_item.show()
menu.append(add_item)
map_name = constants.MAP_TITLE[config.get("geography.map_service")]
title = _("Replace '%(map)s' by =>") % {
'map' : map_name
}
add_item = Gtk.MenuItem(label=title)
add_item.show()
menu.append(add_item)
self.changemap = Gtk.Menu()
changemap = self.changemap
changemap.set_title(title)
changemap.show()
add_item.set_submenu(changemap)
# show in the map menu all available providers
for my_map in constants.MAP_TYPE:
changemapitem = Gtk.MenuItem(label=constants.MAP_TITLE[my_map])
changemapitem.show()
changemapitem.connect("activate", self.change_map, my_map)
changemap.append(changemapitem)
reload_text = _("Reload all visible tiles for '%(map)s'.") % {
'map' : map_name
}
self.reloadtiles = Gtk.MenuItem(label=reload_text)
reloadtiles = self.reloadtiles
reloadtiles.connect("activate", self.reload_visible_tiles)
reloadtiles.show()
menu.append(reloadtiles)
clear_text = _("Clear the '%(map)s' tiles cache.") % {
'map' : map_name
}
self.clearmap = Gtk.MenuItem(label=clear_text)
clearmap = self.clearmap
clearmap.connect("activate", self.clear_map,
constants.TILES_PATH[config.get(
"geography.map_service")])
clearmap.show()
menu.append(clearmap)
menu.show()
menu.popup(None, None, None,
None, event.button, event.time)
return 1
def reload_visible_tiles(self, menu):
"""
We need to reload all visible tiles for the current map
"""
self.reload_tiles()
def clear_map(self, menu, the_map):
"""
We need to clean the tiles cache for the current map
"""
import shutil
path = "%s%c%s" % (config.get('geography.path'), os.sep, the_map)
shutil.rmtree(path, ignore_errors=True)
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
Must be done in the associated menu.
"""
raise NotImplementedError
def set_center(self, menu, event, lat, lon):
"""
Center the map at the new position then save it.
"""
self.osm.set_center_and_zoom(lat, lon,
config.get("geography.zoom_when_center"))
self.save_center(lat, lon)
#-------------------------------------------------------------------------
#
# Markers management
#
#-------------------------------------------------------------------------
def is_there_a_marker_here(self, event, lat, lon):
"""
Is there a marker at this position ?
"""
found = False
mark_selected = []
self.uistate.set_busy_cursor(True)
for mark in self.sort:
# as we are not precise with our hand, reduce the precision
# depending on the zoom.
precision = {
1 : '%3.0f', 2 : '%3.1f', 3 : '%3.1f', 4 : '%3.1f',
5 : '%3.2f', 6 : '%3.2f', 7 : '%3.2f', 8 : '%3.3f',
9 : '%3.3f', 10 : '%3.3f', 11 : '%3.3f', 12 : '%3.3f',
13 : '%3.3f', 14 : '%3.4f', 15 : '%3.4f', 16 : '%3.4f',
17 : '%3.4f', 18 : '%3.4f'
}.get(config.get("geography.zoom"), '%3.1f')
shift = {
1 : 5.0, 2 : 5.0, 3 : 3.0,
4 : 1.0, 5 : 0.5, 6 : 0.3, 7 : 0.15,
8 : 0.06, 9 : 0.03, 10 : 0.015,
11 : 0.005, 12 : 0.003, 13 : 0.001,
14 : 0.0005, 15 : 0.0003, 16 : 0.0001,
17 : 0.0001, 18 : 0.0001
}.get(config.get("geography.zoom"), 5.0)
latp = precision % lat
lonp = precision % lon
mlatp = precision % float(mark[3])
mlonp = precision % float(mark[4])
latok = lonok = False
_LOG.debug(" compare latitude : %s with %s (precision = %s)"
" place='%s'", float(mark[3]), lat, precision, mark[0])
_LOG.debug("compare longitude : %s with %s (precision = %s)"
" zoom=%d", float(mark[4]), lon, precision,
config.get("geography.zoom"))
if (float(mlatp) >= (float(latp) - shift)) and \
(float(mlatp) <= (float(latp) + shift)):
latok = True
if (float(mlonp) >= (float(lonp) - shift)) and \
(float(mlonp) <= (float(lonp) + shift)):
lonok = True
if latok and lonok:
mark_selected.append(mark)
found = True
if found:
self.bubble_message(event, lat, lon, mark_selected)
self.uistate.set_busy_cursor(False)
def bubble_message(self, event, lat, lon, mark):
"""
Display the bubble message. depends on the view.
"""
raise NotImplementedError
def add_selection_layer(self):
"""
add the selection layer
"""
selection_layer = SelectionLayer()
self.osm.layer_add(selection_layer)
return selection_layer
def remove_layer(self, layer):
"""
Remove the specified layer
"""
self.osm.remove_layer(layer)
def add_marker(self, menu, event, lat, lon, event_type, differtype,
count, color=None):
"""
Add a new marker
"""
mapservice = config.get('geography.map_service')
if (mapservice in (constants.OPENSTREETMAP,
constants.OPENSTREETMAP_RENDERER)):
default_image = self.geo_mainmap
else:
default_image = self.geo_altmap
value = default_image
if event_type is not None:
value = self.geo_othermap.get(int(event_type), default_image)
if differtype: # in case multiple evts
value = default_image # we use default icon.
self.marker_layer.add_marker((float(lat), float(lon)), value,
count, color=color)
def remove_all_gps(self):
"""
Remove all gps points on the map
"""
self.osm.gps_clear()
def remove_all_tracks(self):
"""
Remove all tracks on the map
"""
self.osm.track_remove_all()
def remove_all_markers(self):
"""
Remove all markers on the map
"""
self.marker_layer.clear_markers()
def _present_in_places_list(self, index, string):
"""
Search a string in place_list depending index
"""
found = any(p[index] == string for p in self.place_list)
return found
def _append_to_places_list(self, place, evttype, name, lat,
longit, descr, year, icontype,
gramps_id, place_id, event_id, family_id,
color=None
):
"""
Create a list of places with coordinates.
"""
found = any(p[0] == place for p in self.places_found)
if not found and (self.nbplaces <
self._config.get("geography.max_places")):
# We only show the first "geography.max_places".
# over 3000 or 4000 places, the geography become unusable.
# In this case, filter the places ...
self.nbplaces += 1
self.places_found.append([place, lat, longit])
self.place_list.append([place, name, evttype, lat,
longit, descr, year, icontype,
gramps_id, place_id, event_id, family_id,
color
])
self.nbmarkers += 1
tfa = float(lat)
tfb = float(longit)
if year is not None:
tfc = int(year)
if tfc != 0:
if tfc < self.minyear:
self.minyear = tfc
if tfc > self.maxyear:
self.maxyear = tfc
tfa += 0.00000001 if tfa >= 0 else -0.00000001
tfb += 0.00000001 if tfb >= 0 else -0.00000001
if self.minlat == 0.0 or tfa < self.minlat:
self.minlat = tfa
if self.maxlat == 0.0 or tfa > self.maxlat:
self.maxlat = tfa
if self.minlon == 0.0 or tfb < self.minlon:
self.minlon = tfb
if self.maxlon == 0.0 or tfb > self.maxlon:
self.maxlon = tfb
def _append_to_places_without_coord(self, gid, place):
"""
Create a list of places without coordinates.
"""
if not [gid, place] in self.place_without_coordinates:
self.place_without_coordinates.append([gid, place])
self.without += 1
def _create_markers(self):
"""
Create all markers for the specified person.
"""
if self.marker_layer is None:
return
self.remove_all_markers()
self.remove_all_gps()
self.remove_all_tracks()
if (self.current_map is not None and
self.current_map != config.get("geography.map_service")):
self.change_map(self.osm, config.get("geography.map_service"))
last = ""
current = ""
differtype = False
#savetype = None
lat = 0.0
lon = 0.0
icon = None
count = 0
self.uistate.set_busy_cursor(True)
_LOG.debug("%s", time.strftime("start create_marker : "
"%a %d %b %Y %H:%M:%S", time.gmtime()))
for mark in self.sort:
current = ([mark[3], mark[4]])
if last == "":
last = current
lat = mark[3]
lon = mark[4]
icon = mark[7]
colour = mark[12]
differtype = False
count = 1
continue
if last != current:
self.add_marker(None, None, lat, lon, icon, differtype,
count, color=colour)
differtype = False
count = 1
last = current
lat = mark[3]
lon = mark[4]
icon = mark[7]
colour = mark[12]
else: # This marker already exists. add info.
count += 1
if icon != mark[7]:
differtype = True
if lat != 0.0 and lon != 0.0:
self.add_marker(None, None, lat, lon, icon, differtype,
count, color=mark[12])
self._set_center_and_zoom()
_LOG.debug("%s", time.strftime(" stop create_marker : "
"%a %d %b %Y %H:%M:%S", time.gmtime()))
self.uistate.set_busy_cursor(False)
def _visible_marker(self, lat, lon):
"""
Is this marker in the visible area ?
"""
bbox = self.osm.get_bbox()
s_lon = lon + 10.0
s_lat = lat + 10.0
pt1 = bbox[0]
s_bbox_lat1 = pt1.rlat + 10.0
s_bbox_lon1 = pt1.rlon + 10.0
pt2 = bbox[1]
s_bbox_lat2 = pt2.rlat + 10.0
s_bbox_lon2 = pt2.rlon + 10.0
result = ((s_bbox_lat1 > s_lat > s_bbox_lat2) and
(s_bbox_lon1 < s_lon < s_bbox_lon2))
return result
def _autozoom_in(self, lvl, p1lat, p1lon, p2lat, p2lon):
"""
We zoom in until at least one marker missing.
"""
if ((self._visible_marker(p1lat, p1lon)
and self._visible_marker(p2lat, p2lon))
and lvl < 18):
lvl += 1
self.osm.set_zoom(lvl)
GLib.timeout_add(int(50), self._autozoom_in, lvl,
p1lat, p1lon, p2lat, p2lon)
else:
GLib.timeout_add(int(50), self._autozoom_out, lvl,
p1lat, p1lon, p2lat, p2lon)
def _autozoom_out(self, lvl, p1lat, p1lon, p2lat, p2lon):
"""
We zoom out until all markers visible.
"""
if (not (self._visible_marker(p1lat, p1lon)
and self._visible_marker(p2lat, p2lon))
and lvl > 1):
lvl -= 1
self.osm.set_zoom(lvl)
GLib.timeout_add(int(50), self._autozoom_out, lvl,
p1lat, p1lon, p2lat, p2lon)
else:
layer = self.get_selection_layer()
if layer:
self.osm.layer_remove(layer)
def _autozoom(self):
"""
Try to put all markers on the map. we start at current zoom.
If all markers are present, continue to zoom.
If some markers are missing : return to the zoom - 1
We must use function called by timeout to force map updates.
"""
level_start = self.osm.props.zoom
p1lat, p1lon = self.begin_selection.get_degrees()
p2lat, p2lon = self.end_selection.get_degrees()
lat = p1lat + (p2lat - p1lat) / 2
lon = p1lon + (p2lon - p1lon) / 2
# We center the map on the center of the region
self.osm.set_center(lat, lon)
self.save_center(lat, lon)
p1lat = self.begin_selection.rlat
p1lon = self.begin_selection.rlon
p2lat = self.end_selection.rlat
p2lon = self.end_selection.rlon
# We zoom in until at least one marker missing.
GLib.timeout_add(int(50), self._autozoom_in, level_start,
p1lat, p1lon, p2lat, p2lon)
def _set_center_and_zoom(self):
"""
Calculate the zoom.
The best should be an auto zoom to have all markers on the screen.
need some works here.
we start at zoom 1 until zoom y ( for this a preference )
If all markers are present, continue to zoom.
If some markers are missing : return to the zoom - 1
The following is too complex. In some case, all markers are not present.
"""
# Select the center of the map and the zoom
signminlon = _get_sign(self.minlon)
signminlat = _get_sign(self.minlat)
signmaxlon = _get_sign(self.maxlon)
signmaxlat = _get_sign(self.maxlat)
current = osmgpsmap.MapPoint.new_degrees(self.minlat, self.minlon)
self.end_selection = current
current = osmgpsmap.MapPoint.new_degrees(self.maxlat, self.maxlon)
self.begin_selection = current
if signminlon == signmaxlon:
maxlong = abs(abs(self.minlon) - abs(self.maxlon))
else:
maxlong = abs(abs(self.minlon) + abs(self.maxlon))
if signminlat == signmaxlat:
maxlat = abs(abs(self.minlat) - abs(self.maxlat))
else:
maxlat = abs(abs(self.minlat) + abs(self.maxlat))
latit = longt = 0.0
for mark in self.sort:
if signminlat == signmaxlat:
if signminlat == 1:
latit = self.minlat+self.centerlat
else:
latit = self.maxlat-self.centerlat
elif self.maxlat > self.centerlat:
latit = self.maxlat-self.centerlat
else:
latit = self.minlat+self.centerlat
if signminlon == signmaxlon:
if signminlon == 1:
longt = self.minlon+self.centerlon
else:
longt = self.maxlon-self.centerlon
elif self.maxlon > self.centerlon:
longt = self.maxlon-self.centerlon
else:
longt = self.minlon+self.centerlon
# all maps: 0.0 for longitude and latitude means no location.
if latit == longt == 0.0:
latit = longt = 0.00000001
self.latit = latit
self.longt = longt
if config.get("geography.lock"):
self.osm.set_center_and_zoom(config.get("geography.center-lat"),
config.get("geography.center-lon"),
config.get("geography.zoom"))
else:
self._autozoom()
self.save_center(self.latit, self.longt)
config.set("geography.zoom", self.osm.props.zoom)
self.end_selection = None
def _get_father_and_mother_name(self, event):
"""
Return the father and mother name of a family event
"""
dbstate = self.dbstate
family_list = [
dbstate.db.get_family_from_handle(ref_handle)
for (ref_type, ref_handle) in
dbstate.db.find_backlink_handles(event.handle)
if ref_type == 'Family'
]
fnam = mnam = _("Unknown")
if family_list:
for family in family_list:
father = mother = None
handle = family.get_father_handle()
if handle:
father = dbstate.db.get_person_from_handle(handle)
handle = family.get_mother_handle()
if handle:
mother = dbstate.db.get_person_from_handle(handle)
fnam = _nd.display(father) if father else _("Unknown")
mnam = _nd.display(mother) if mother else _("Unknown")
return (fnam, mnam)
#-------------------------------------------------------------------------
#
# KML functionalities
#
#-------------------------------------------------------------------------
def load_kml_files(self, obj):
"""
obj can be an event, a person or a place
"""
media_list = obj.get_media_list()
if media_list:
for media_ref in media_list:
object_handle = media_ref.get_reference_handle()
media_obj = self.dbstate.db.get_media_from_handle(object_handle)
path = media_obj.get_path()
name, extension = os.path.splitext(path)
if extension == ".kml":
if os.path.isfile(path):
self.kml_layer.add_kml(path)
#-------------------------------------------------------------------------
#
# Printing functionalities
#
#-------------------------------------------------------------------------
def printview(self, obj):
"""
Print or save the view that is currently shown
"""
if Gtk.MAJOR_VERSION == 3 and Gtk.MINOR_VERSION < 11:
from gramps.gui.dialog import WarningDialog
WarningDialog(
_("You can't use the print functionality"),
_("Your Gtk version is too old."),
parent=self.uistate.window)
return
req = self.osm.get_allocation()
widthpx = req.width
heightpx = req.height
prt = CairoPrintSave(widthpx, heightpx, self.osm.do_draw, self.osm)
prt.run()
#-------------------------------------------------------------------------
#
# Specific functionalities
#
#-------------------------------------------------------------------------
def center_here(self, menu, event, lat, lon, mark):
"""
Center the map at the marker position
"""
self.set_center(menu, event, float(mark[3]), float(mark[4]))
def add_place_bubble_message(self, event, lat, lon, marks,
menu, message, mark):
"""
Create the place menu of a marker
"""
add_item = Gtk.MenuItem()
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.set_title(message)
itemoption.show()
add_item.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Place"))
modify.show()
modify.connect("activate", self.edit_place, event, lat, lon, mark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here, event, lat, lon, mark)
itemoption.append(center)
add_item = Gtk.MenuItem()
add_item.show()
menu.append(add_item)
def edit_place(self, menu, event, lat, lon, mark):
"""
Edit the selected place at the marker position
"""
self.mark = mark
place = self.dbstate.db.get_place_from_gramps_id(self.mark[9])
parent_list = place.get_placeref_list()
if len(parent_list) > 0:
parent = parent_list[0].ref
else:
parent = None
self.select_fct = PlaceSelection(self.uistate, self.dbstate, self.osm,
self.selection_layer, self.place_list,
lat, lon, self.__edit_place, parent)
def edit_person(self, menu, event, lat, lon, mark):
"""
Edit the selected person at the marker position
"""
_LOG.debug("edit_person : %s", mark[8])
# need to add code here to edit the person.
person = self.dbstate.db.get_person_from_gramps_id(mark[8])
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
def edit_family(self, menu, event, lat, lon, mark):
"""
Edit the selected family at the marker position
"""
_LOG.debug("edit_family : %s", mark[11])
family = self.dbstate.db.get_family_from_gramps_id(mark[11])
try:
EditFamily(self.dbstate, self.uistate, [], family)
except WindowActiveError:
pass
def edit_event(self, menu, event, lat, lon, mark):
"""
Edit the selected event at the marker position
"""
_LOG.debug("edit_event : %s", mark[10])
event = self.dbstate.db.get_event_from_gramps_id(mark[10])
try:
EditEvent(self.dbstate, self.uistate, [], event)
except WindowActiveError:
pass
def add_place(self, menu, event, lat, lon):
"""
Add a new place using longitude and latitude of location centered
on the map
"""
self.select_fct = PlaceSelection(self.uistate, self.dbstate, self.osm,
self.selection_layer, self.place_list,
lat, lon, self.__add_place)
def add_place_from_kml(self, menu, event, lat, lon):
"""
Add new place(s) from a kml file
1 - ask for a kml file ?
2 - Read the kml file.
3 - create the place(s) with name and title found in the kml marker.
"""
# Ask for the kml file
filtering = Gtk.FileFilter()
filtering.add_pattern("*.kml")
kml = Gtk.FileChooserDialog(
_("Select a kml file used to add places"),
action=Gtk.FileChooserAction.OPEN,
parent=self.uistate.window,
buttons=(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Apply'), Gtk.ResponseType.OK))
mpath = HOME_DIR
kml.set_current_folder(os.path.dirname(mpath))
kml.set_filter(filtering)
status = kml.run()
if status == Gtk.ResponseType.OK:
val = kml.get_filename()
if val:
kmlfile = Kml(val)
points = kmlfile.add_points()
for place in points:
(name, coords) = place
latlong = coords.pop()
(lat, lon) = latlong
place_name = PlaceName()
place_name.set_value(name)
new_place = Place()
new_place.set_name(place_name)
new_place.set_title(name)
new_place.set_latitude(str(lat))
new_place.set_longitude(str(lon))
try:
EditPlace(self.dbstate, self.uistate, [], new_place)
except WindowActiveError:
pass
kml.destroy()
def link_place(self, menu, event, lat, lon):
"""
Link an existing place using longitude and latitude of location centered
on the map
If we have a place history, we must show all places to avoid an empty
place selection in the PlaceSelection.
"""
if self.uistate.get_active('Place'):
self._createmap(None)
selector = SelectPlace(self.dbstate, self.uistate, [])
place = selector.run()
if place:
parent_list = place.get_placeref_list()
if len(parent_list) > 0:
parent = parent_list[0].ref
else:
parent = None
places_handle = self.dbstate.db.iter_place_handles()
nb_places = 0
gids = ""
place_title = _pd.display(self.dbstate.db, place)
for place_hdl in places_handle:
plce = self.dbstate.db.get_place_from_handle(place_hdl)
plce_title = _pd.display(self.dbstate.db, plce)
if plce_title == place_title:
nb_places += 1
if gids == "":
gids = plce.gramps_id
else:
gids = gids + ", " + plce.gramps_id
if nb_places > 1:
from gramps.gui.dialog import WarningDialog
WarningDialog(
_('You have at least two places with the same title.'),
_("The title of the places is:\n%(title)s\n"
"The following places are similar: %(gid)s\n"
"You should eiher rename the places or merge them.\n\n"
"%(bold_start)s"
"I can't proceed with your request"
"%(bold_end)s.\n") % {
'bold_start' : '<b>',
'bold_end' : '</b>',
'title': '<b>' + place_title + '</b>',
'gid': gids},
parent=self.uistate.window
)
else:
self.mark = [None, None, None, None, None, None, None,
None, None, place.gramps_id, None, None]
self.select_fct = PlaceSelection(self.uistate,
self.dbstate,
self.osm,
self.selection_layer,
self.place_list,
lat,
lon,
self.__edit_place,
parent)
def __add_place(self, parent, plat, plon):
"""
Add a new place using longitude and latitude of location centered
on the map
"""
self.select_fct.close()
new_place = Place()
new_place.set_latitude(str(plat))
new_place.set_longitude(str(plon))
if parent:
if isinstance(parent, Place):
placeref = PlaceRef()
placeref.ref = parent
new_place.add_placeref(placeref)
else:
found = None
for place in self.dbstate.db.iter_places():
found = place
if place.name.get_value() == parent:
break
placeref = PlaceRef()
placeref.ref = found.get_handle()
new_place.add_placeref(placeref)
try:
EditPlace(self.dbstate, self.uistate, [], new_place)
self.add_marker(None, None, plat, plon, None, True, 0)
except WindowActiveError:
pass
def __edit_place(self, parent, plat, plon):
"""
Edit the selected place at the marker position
"""
self.select_fct.close()
place = self.dbstate.db.get_place_from_gramps_id(self.mark[9])
place.set_latitude(str(plat))
place.set_longitude(str(plon))
try:
EditPlace(self.dbstate, self.uistate, [], place)
except WindowActiveError:
pass
def __link_place(self, parent, plat, plon):
"""
Link an existing place using longitude and latitude of location centered
on the map
"""
selector = SelectPlace(self.dbstate, self.uistate, [])
place = selector.run()
if place:
self.select_fct.close()
place.set_latitude(str(plat))
place.set_longitude(str(plon))
if parent:
placeref = PlaceRef()
placeref.ref = parent
place.add_placeref(placeref)
try:
EditPlace(self.dbstate, self.uistate, [], place)
self.add_marker(None, None, plat, plon, None, True, 0)
except WindowActiveError:
pass
#-------------------------------------------------------------------------
#
# Geography preferences
#
#-------------------------------------------------------------------------
def _get_configure_page_funcs(self):
"""
The function which is used to create the configuration window.
"""
return [self.map_options, self.specific_options]
def config_zoom_and_position(self, client, cnxn_id, entry, data):
"""
Do we need to lock the zoom and position ?
"""
if config.get("geography.lock"):
config.set("geography.lock", False)
self._set_center_and_zoom()
else:
config.set("geography.lock", True)
self.lock = config.get("geography.lock")
def config_crosshair(self, client, cnxn_id, entry, data):
"""
We asked to change the crosshair.
"""
if config.get("geography.show_cross"):
config.set("geography.show_cross", False)
else:
config.set("geography.show_cross", True)
self.set_crosshair(config.get("geography.show_cross"))
def specific_options(self, configdialog):
"""
Add specific entry to the preference menu.
Must be done in the associated view.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
configdialog.add_text(grid, _('Nothing for this view.'), 0)
return _('Specific parameters'), grid
def map_options(self, configdialog):
"""
Function that builds the widget in the configuration dialog
for the map options.
"""
self._config.set('geography.path', config.get('geography.path'))
self._config.set('geography.zoom_when_center',
config.get('geography.zoom_when_center'))
self._config.set('geography.max_places',
self._config.get('geography.max_places'))
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.path_entry = Gtk.Entry()
configdialog.add_path_box(grid,
_('Where to save the tiles for offline mode.'),
0, self.path_entry, config.get('geography.path'),
self.set_tilepath, self.select_tilepath)
configdialog.add_text(grid,
_('If you have no more space in your file system. '
'You can remove all tiles placed in the above path.\n'
'Be careful! If you have no internet, you\'ll get no map.'),
2, line_wrap=False)
configdialog.add_slider(grid,
_('Zoom used when centering'),
3, 'geography.zoom_when_center',
(2, 16))
configdialog.add_slider(grid,
_('The maximum number of places to show'),
4, 'geography.max_places',
(1000, 10000))
configdialog.add_checkbox(grid,
_('Use keypad for shortcuts :\n'
'Either we choose the + and - from the keypad if we '
'select this,\n'
'or we use the characters from the keyboard.'),
5, 'geography.use-keypad',
extra_callback=self.update_shortcuts)
return _('The map'), grid
def set_tilepath(self, *obj):
"""
Save the tile path in the config section.
"""
if self.path_entry.get_text().strip():
config.set('geography.path', self.path_entry.get_text())
else:
config.set('geography.path', GEOGRAPHY_PATH)
def select_tilepath(self, *obj):
"""
Call a file chooser selection box to select the tile path.
"""
selected_dir = Gtk.FileChooserDialog(
_("Select tile cache directory for offline mode"),
action=Gtk.FileChooserAction.SELECT_FOLDER,
parent=self.uistate.window,
buttons=(_('_Cancel'),
Gtk.ResponseType.CANCEL,
_('_Apply'),
Gtk.ResponseType.OK))
mpath = config.get('geography.path')
if not mpath:
mpath = HOME_DIR
selected_dir.set_current_folder(os.path.dirname(mpath))
status = selected_dir.run()
if status == Gtk.ResponseType.OK:
val = selected_dir.get_filename()
if val:
self.path_entry.set_text(val)
selected_dir.destroy()
|
django-nonrel/django | refs/heads/nonrel-1.6 | tests/utils_tests/test_itercompat.py | 569 | from django.test import TestCase
from .models import Category, Thing
class TestIsIterator(TestCase):
def test_regression(self):
"""This failed on Django 1.5/Py2.6 because category has a next method."""
category = Category.objects.create(name='category')
Thing.objects.create(category=category)
Thing.objects.filter(category=category)
|
mivanov/editkit | refs/heads/master | editkit/pages/forms.py | 1 | import mimetypes
from django import forms
from versionutils.merging.forms import MergeMixin
from versionutils.versioning.forms import CommentMixin
from pages.models import Page, PageFile, slugify
from pages.widgets import WikiEditor
from versionutils.merging import merge_html
class PageForm(MergeMixin, CommentMixin, forms.ModelForm):
conflict_error = (
"Warning: someone else saved this page before you. "
"Please resolve edit conflicts and save again."
)
class Meta:
model = Page
fields = ('content',)
widgets = {'content': WikiEditor()}
def merge(self, yours, theirs, ancestor):
# ancestor may be None
ancestor_content = ''
if ancestor:
ancestor_content = ancestor['content']
(merged_content, conflict) = merge_html(
yours['content'], theirs['content'], ancestor_content
)
if conflict:
self.data = self.data.copy()
self.data['content'] = merged_content
raise forms.ValidationError(self.conflict_error)
else:
yours['content'] = merged_content
return yours
def clean_name(self):
name = self.cleaned_data['name']
try:
page = Page.objects.get(slug__exact=slugify(name))
if self.instance != page:
raise forms.ValidationError(
'A page with this name already exists'
)
except Page.DoesNotExist:
pass
return name
class PageFileForm(CommentMixin, forms.ModelForm):
def clean(self):
self.cleaned_data = super(PageFileForm, self).clean()
if self.instance.name:
filename = self.cleaned_data['file'].name
(mime_type, enc) = mimetypes.guess_type(filename)
if mime_type != self.instance.mime_type:
raise forms.ValidationError(
'The new file should be of the same type')
return self.cleaned_data
class Meta:
model = PageFile
fields = ('file',)
|
fergy/kernel_lge_e0 | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
Parisson/cartridge | refs/heads/master | cartridge/shop/migrations/0007_auto_20150921_2323.py | 7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cartridge.shop.fields
class Migration(migrations.Migration):
dependencies = [
('shop', '0006_auto_20150916_0459'),
]
operations = [
migrations.AlterField(
model_name='product',
name='sku',
field=cartridge.shop.fields.SKUField(max_length=20, null=True, verbose_name='SKU', blank=True),
),
migrations.AlterField(
model_name='productvariation',
name='sku',
field=cartridge.shop.fields.SKUField(max_length=20, null=True, verbose_name='SKU', blank=True),
),
migrations.AlterUniqueTogether(
name='product',
unique_together=set([('sku', 'site')]),
),
]
|
oditorium/django-tag | refs/heads/master | tag/models/tag.py | 1 | """
a generic tag model for Django
Copyright (c) Stefan LOESCH, oditorium 2016. All rights reserved.
Licensed under the Mozilla Public License, v. 2.0 <https://mozilla.org/MPL/2.0/>
"""
__version__ = "1.5"
__version_dt__ = "2016-05-13"
__copyright__ = "Stefan LOESCH, oditorium 2016"
__license__ = "MPL v2.0"
from django.db import models
from django.core.signing import Signer, BadSignature
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
import json
#from itertools import chain
#####################################################################################################
## TAG BASE
class TagBase(object):
"""
base class for a hierarchical tag object
"""
@property
def tag(self):
"""the actual tag string, including (in case of a hierarchical tag) the hierarchy separators"""
raise NotImplementedError()
@property
def parent(self):
"""the parent of the current tag (returns the object, not the tag )"""
raise NotImplementedError()
@property
def direct_children_g(self):
"""the direct children of the current tag (returns generator of objects, not tag strings)"""
raise NotImplementedError()
@property
def is_leaf(self):
"""
a tag is a leaf iff it has no children
"""
return len(tuple(self.direct_children_g)) == 0
@property
def direct_children(self):
"""
the direct children of the current tag (returns the objects, not the tag strings)
"""
return { t for t in self.direct_children_g }
@property
def children(self):
"""
the children of the current tag (returns the objects, not the tag strings)
"""
children = self.direct_children
for t in self.direct_children:
children = children.union(t.children)
return children
@property
def family(self):
"""
the children plus the tag itself (returns set of objects, not the tag strings)
"""
return self.children.union({self})
@property
def leaves(self):
"""
all leaf-tags below self, as generator of objects
"""
if self.is_leaf: return (self,)
else: return ( t1 for t2 in self.direct_children_g for t1 in t2.leaves )
#return tuple( t.leaves for t in self.direct_children_g )
@classmethod
def all_leaves(cls, root_tags=None):
"""
generator for all leaves below root_tags (or cls.root_tags() if None)
"""
if root_tags == None: root_tags = cls.root_tags()
return ( t2 for t1 in root_tags for t2 in t1.leaves )
@classmethod
def root_tags(cls):
"""returns a generator of root tags (ie tags with no parent), ordered by id"""
raise NotImplementedError()
def delete(self, *args, **kwargs):
"""
delete that tag (and all below it)
"""
try: super().delete(*args, **kwargs)
except: raise NotImplementedError()
@classmethod
def parent_tagstr(cls, tagstr):
"""
the tag string of the parent tag
"""
try: return tagstr.rsplit(cls.hierarchy_separator, 1)[-2]
except IndexError: return None
@property
def short_tag(self):
"""
the stub tag string of the child tag
"""
return self.tag.rsplit(self.hierarchy_separator, 1)[-1]
@classmethod
def get(cls, tagstr):
"""
gets the tag object corresponding to the tag string (possibly creating it and entire hierarchy)
"""
if tagstr==None: return None
# play nicely with None tagstrings (they just result in a None tag)
if isinstance(tagstr, TagBase): return tagstr
# play nicely with tag strings already converted into tags
tag = cls.get_if_exists(tagstr)
if tag: return tag
# get_if_exists returns the tag corresponding to the tag string iff it exists, None else
# so if we get an object back, this is the tag object and we return it
parent_tagstr = cls.parent_tagstr(tagstr)
parent_tag = cls.get(parent_tagstr)
# parent_tagstr is the string representation of the parent tag
# we recursively call get to retrieve (and create, if need be!) that tag
# in case this is a top-level tag, parent_tagstr() returns None, and get() then also returns None
tag = cls.create_no_checks(tagstr, parent_tag)
# this creates new tag with string representation tagstr, and parent object parent_tag
# in case this is a top-level tag, parent_tag will be None
return tag
@classmethod
def deltag(cls, tagstr):
"""
deletes the tag object corresponding to the tag string (possibly deleting the entire hierarchy below)
"""
tag = cls.get(tagstr)
if tag != None: tag.delete()
@classmethod
def get_if_exists(cls, tagstr):
"""
gets the tag object corresponding to the tag string if it exists, None else
"""
raise NotImplementedError()
@classmethod
def create_no_checks(cls, tagstr, parent_tag=None):
"""creates the tag object corresponding to the tag string (must not previously exist, exception else)"""
raise NotImplementedError()
@property
def depth(self):
"""
returns the depth of the current tag in the hierachy (root=0)
"""
parent = self.parent
if parent != None: return 1 + parent.depth
return 1
hierarchy_separator = "::"
# defines the string that separtes tags in the hierarchy; for example:
# assume hierarchy_separator == '::', then a::b::c is subtag of a::b is subtag of a
def __repr__(s):
return "{1}.get('{0.tag}')".format(s, s.__class__.__name__)
def __str__(s):
return s.__repr__()
#####################################################################################################
## ROOT BASE
class RootTag(TagBase):
"""
the topmost tag of any hierarchy
"""
@property
def tag(self):
return ""
@property
def parent(self):
return self
@property
def depth(self):
return 0
@classmethod
def get(cls, tagstr):
if tagstr != "" and tagstr != None: raise NotImplementedError()
return cls()
def __repr__(s):
return "RootTag()"
#####################################################################################################
## TAG
class Tag(TagBase, models.Model):
"""
the actual class implementing tags
USAGE
Tag.hierarchy_separator = '::'
tag = Tag.get('aaa')
print (tag.tag) # 'aaa'
print (tag.short_tag) # 'aaa'
print (tag.depth) # 1
print (tag.parent.tag) # ''
print (tag.parent.depth) # 0
tag = Tag.get('aaa::bbb')
print (tag.tag) # 'aaa::bbb'
print (tag.short_tag) # 'bbb'
print (tag.depth) # 2
print (tag.parent.tag) # 'aaa'
print (tag.parent.depth) # 1
"""
_tag = models.CharField(max_length=255, unique=True, blank=True, default="", null=False, db_index=True)
# that's the actual tag, including (in case of a hierarchical tag) the tag separator
_parent_tag = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
# the parent of the current tag, if any
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._tag == other._tag
# if we do it on the ids it does not work for not-yet-saved tags!
else: return False
def __hash__(self):
return str(self._tag).__hash__()
@property
def tag(self):
"""the actual tag string, including (in case of a hierarchical tag) the hierarchy separators"""
return self._tag
@property
def parent(self):
"""
the parent of the current tag (returns the object, not the tag string)
"""
if not self._parent_tag: return RootTag()
return self._parent_tag
@property
def direct_children_g(self):
"""
the direct children of the current tag (returns generator of objects, not tag strings)
"""
return ( t for t in self.__class__.objects.filter(_parent_tag=self) )
@classmethod
def root_tags(cls):
"""
returns a generator of root tags (ie tags with no parent), ordered by id
"""
return (t for t in cls.objects.filter(_parent_tag=None).order_by('id'))
@classmethod
def get_if_exists(cls, tagstr):
"""
gets the tag object corresponding to the tag string if it exists, None else
"""
if tagstr=="": return RootTag()
try: return cls.objects.get(_tag=tagstr)
except: return None
@classmethod
def create_no_checks(cls, tagstr, parent_tag=None):
"""
creates the tag object corresponding to the tag string (must not previously exist, exception else)
"""
if tagstr=="": return RootTag()
newtag = cls(_tag=tagstr, _parent_tag=parent_tag)
newtag.save()
return newtag
def __repr__(s):
return "TAG('{0.tag}')".format(s, s.__class__.__name__)
def TAG(tagstr):
"""convenience method for Tag.get"""
return Tag.get(tagstr)
#####################################################################################################
## TAG MIXIN
#############################################################
## ERROR / SUCCESS
def _error(msg, reference=None, status=None):
if status == None: status = 404
return JsonResponse({'success': False, 'errmsg': msg, 'data': {}, 'reference': reference}, status=status)
def _success(data, reference=None, status=None):
if status == None: status = 200
return JsonResponse({'data': data, 'success': True, 'reference': reference}, status=status)
#############################################################
## EXCEPTIONS
class TokenSignatureError(RuntimeError): pass # the token signature is invalid
class TokenFormatError(RuntimeError): pass # the token format is invalid
class IllegalCommandError(RuntimeError): pass # that command is not valid
class ItemDoesNotExistError(RuntimeError): pass # the item does not exist
class TagDoesNotExistError(RuntimeError): pass # the tag does not exist
class TokenContentError(RuntimeError): pass # the token content is invalid
class TokenDefinitionError(RuntimeError): pass # bad parameters when defining a token
#############################################################
## TOKEN
class Token():
"""
allows definition tokens for the tag API
"""
def __init__(s, token):
try: token = Signer(sep=s.separators, salt=s.salt).unsign(token)
except BadSignature: raise TokenSignatureError(token)
s.token = token.split(s.separator)
if len(s.token) != 4: raise TokenFormatError("Invalid token format [1]")
separators=":::"
separator="::"
separator2=":"
salt="token"
@classmethod
def create(cls, namespace, command, tag_id=None, item_id=None):
"""
create a token
PARAMETERS
- namespace: the token namespace (string, minimum 2 characters)
- command: the token command (can be a string, or a list of strings if it uses parameters)
- tag_id: the tag id (if any) this command relates to
- item_id: the item id (if any) this command relates to
"""
if len(namespace) < 2: raise TokenDefinitionError("namespace minimum 2 characters")
if not isinstance(command, str): command = cls.separator2.join(command)
token = cls.separator.join([namespace, command, str(tag_id), str(item_id)])
return Signer(sep=cls.separators, salt=cls.salt).sign(token)
@property
def namespace(s):
"""
the token namespace
"""
return s.token[0]
@property
def command(s):
"""
the token command (without paramters)
"""
return s.token[1].split(s.separator2)[0]
@property
def parameters(s):
"""
the token command parameters (as list)
"""
return s.token[1].split(s.separator2)[1:]
@property
def numparameters(s):
"""
the number of token parameters
"""
return len(s.parameters)
@property
def tag_id(s):
"""
the (numeric) tag id, or None
"""
value = s.token[2]
if value == "None": return None
return int(value)
@property
def item_id(s):
"""
the (numeric) item id, or None
"""
value = s.token[3]
if value == "None": return None
return int(value)
def __str__(s):
return "Token({})"
#############################################################
## TAG MIXIN
class TagMixin(models.Model):
"""
a mixin for Django models, linking them to the Tag model
NOTES
- this mixin contains a model field (`_tag_references`); in order for this field to actually
appear in the database table of the final the mixin must derive from `models.Model`*
- for this table to not appear in the database, we need the Meta class with `abstract=True`
USAGE
Basic usage is here. See the tests for more detailed examples.
class MyTaggedClass(TagMixin, models.Model):
...
tc = MyTaggedClass()
tc.tag_add('mytag1')
tc.tag_add('mytag1')
tc.tags # {TAG('mytag1'), TAG('mytag2')}
tc.tag_remove('mytag1')
tc.tags # {TAG('mytag2')}
MyTaggedClass.tagged_as('mytag2') # set(tc)
*see <http://stackoverflow.com/questions/6014282/django-creating-a-mixin-for-reusable-model-fields>
"""
_tag_references = models.ManyToManyField(Tag, blank=True)
# that's the key connection to the tags field
class Meta:
abstract = True
save_if_necessary = True
# if True, tag_add will save the record if it needs to in order to establish the relationship
# otherwise tag_add proceeds, and an exception is thrown
@staticmethod
def tag(tagstr):
"""
convenience method to get a tag object from a tagstr
"""
return Tag.get(tagstr)
def tag_add(self, tag_or_tagstr):
"""
adds a tag to a specific record
"""
if self.id == None:
if self.save_if_necessary: self.save()
self._tag_references.add( Tag.get(tag_or_tagstr) )
def tag_remove(self, tag_or_tagstr):
"""
removes a tag from a specific record
"""
self._tag_references.remove( Tag.get(tag_or_tagstr) )
def tag_toggle(self, tag_or_tagstr):
"""
toggles a tag on a specific record
"""
raise NotImplementedError('tag_toggle')
@property
def tags(self):
"""
returns all tags from that specific record (as set)
"""
return {t for t in self.tags_qs}
@property
def tags_str(self):
"""
returns all tags from that specific record (as string)
"""
return " ".join([t.tag for t in self.tags_qs])
@property
def tags_qs(self):
"""
returns all tags from that specific record (as queryset)
"""
return self._tag_references.all()
def has_tag(self, tag_or_tagstr):
"""
whether this item has that particular tag
"""
tag = Tag.get(tag_or_tagstr)
return tag in self.tags_qs
@classmethod
def tags_fromqs(cls, self_queryset, as_queryset=False):
"""
returns all tags that are in relation to self_queryset (return tags as flat list or queryset)
USAGE
qs = MyTaggedClass.objects.filter(...)
tags = MyTaggedClass.tags_fromqs(qs) # ['tag1', 'tag2', ...]
tags_qs = MyTaggedClass.tags_fromqs(qs, as_queryset=True ) # queryset
"""
# http://stackoverflow.com/questions/4823601/get-all-related-many-to-many-objects-from-a-django-queryset
kwargs = {(cls.__name__+"__in").lower(): self_queryset}
tag_queryset = Tag.objects.filter(**kwargs).distinct()
if as_queryset: return tag_queryset
return [tag for tag in tag_queryset.values_list('_tag', flat=True)]
@classmethod
def tagged_as(cls, tag_or_tagstr, include_children=True, as_queryset=True):
"""
returns all records that are tagged with this tag (and possibly its children)
NOTES
- if `include_children` is true'ish, all records tagged with this tag or its children
are returned, otherwise only with this tag
- if `as_queryset` is true'ish, a queryset is returned that can be acted upon further
(eg by filtering); otherwise a set is returned
"""
tag = Tag.get(tag_or_tagstr)
if include_children: tag = tag.family
else: tag = [tag]
qset = cls.objects.filter(_tag_references__in=tag)
if as_queryset: return qset
return {record for record in qset}
########################################
## TAG TOKEN XXX
@classmethod
def tag_token(cls, command, tag_or_tag_id=None, item_or_item_id=None):
"""
generic token generation
command: one of 'add', 'remove', 'toggle'
tag_id,item_id: identifying the tag and item respectively
"""
if not command in ['add', 'remove', 'toggle']: raise IllegalCommandError(command)
if not isinstance(item_or_item_id, int): item_or_item_id = item_or_item_id.id
if not isinstance(tag_or_tag_id, int): tag_or_tag_id = tag_or_tag_id.id
return Token.create(cls.__name__, command, tag_or_tag_id, item_or_item_id)
def tag_token_add(s, tag_or_tag_id):
"""
creates a token to allow adding a tag
"""
return s.tag_token("add", tag_or_tag_id, s.id)
def tag_token_remove(s, tag_or_tag_id):
"""
creates a token to allow adding a tag
"""
return s.tag_token("remove", tag_or_tag_id, s.id)
def tag_token_toggle(s, tag_or_tag_id):
"""
creates a token to allow adding a tag
"""
return s.tag_token("toggle", tag_or_tag_id, s.id)
def tag_token_all(s, tag_or_tag_id):
"""
return a dict of all tokens for this tag, item
"""
return {
'tag': tag_or_tag_id,
'add': s.tag_token_add(tag_or_tag_id),
'remove': s.tag_token_remove(tag_or_tag_id),
'toggle': s.tag_token_toggle(tag_or_tag_id),
}
@property
def tags_token_all(s):
"""
returns a list of dicts for all tags, and all tokens for each of those tags, for this item
NOTES
- all tags being defined as all Tag.all_leaves
- the dicts are those created by `tag_token_all`
"""
return [ s.tag_token_all(t) for t in Tag.all_leaves()]
########################################
## TAG TOKEN EXECUTE
@classmethod
def tag_token_execute(cls, token, params=None):
"""
execute a token command
NOTES
- `token` is the relevant token
- `params` are the parameters
##(can be bytes; if string assumes it is json encoded)
"""
t = Token(token)
if t.namespace != cls.__name__:
raise TokenContentError("using {} token for a {} object".format(t.namespace, cls.__name__))
try: item = cls.objects.get(id=t.item_id)
except: raise ItemDoesNotExistError(t.item_id)
try: tag = Tag.objects.get(id=t.tag_id)
except: raise TagDoesNotExistError(t.tag_id)
result = {'item_id': t.item_id, 'tag_id': t.tag_id, 'tag': tag.tag, 'short_tag': tag.short_tag}
# add/remove/toggle
if t.command == "add": item.tag_add(tag)
elif t.command == "remove": item.tag_remove(tag)
elif t.command == "toggle": item.tag_toggle(tag)
# error
else:
raise IllegalCommandError(t.command)
result['item_has_tag'] = item.has_tag(tag)
return result
########################################
## TAG AS VIEW
@classmethod
def tag_as_view(cls):
"""
returns a API view function that can be used directly in an `urls.py` file
NOTE:
- the view function expects POST for all requests, even those that are only reading data
- the data has to be transmitted in json, not URL encoded; fields:
- `token`: the API token that determines the request
- `parameters`: additional parameters (currently ignored)
- `reference`: frontend reference data, returned unchanged*
- the response is json; fields:
- `success`: true or false
- `errmsg`: long error message**
- `reference`: the reference data originally submitted*
- `data.tag_id`: the ID of the relevant tag**
- `data.tag`: the full name of the relevant tag**
- `data.short_tag`: the short tag**
- `data.item_id`: the ID of the relevant item**
- `data.item_has_tag`: true or false**
* allows for the JavaScript to more easily interpret the response
** presence depends on the value of `success`
PARAMETERS:
USAGE
In the `urls.py` file:
urlpatterns += [
url(r'^api/somemodel$', SomeModel.tag_as_view(), name="api_somemodel_tag"),
]
In the `models.py` file:
class SomeModel(TagMixin, models.Model):
...
In the `views.py` file:
context['item'] = SomeModel.objects.get(id=...)
...
In the `template.html` file:
<ul class='taglist'>
{% for t in item.tags_token_all %}
<li>
{{t.tag.tag}}
<span class='active-tag' data-token='{{t.add}}' data-msg='added tag {{t.tag.tag}}'>add</span>
</li>
{% endfor %}
</ul>
<script>
$('.active-tag').on('click', function(e){
var target = $(e.target)
var token = target.data('token')
var msg = target.data('msg')
var data = JSON.stringify({token: token, params: {}, reference: {msg: msg}})
$.post("{% url 'api_somemodel_tag'%}", data).done(function(r){console.log(r.reference.msg)})
})
</script>
"""
@csrf_exempt
def view(request):
if request.method != "POST": return _error("request must be POST")
try: data = json.loads(request.body.decode())
except:
raise
return _error('could not json-decode request body [{}]'.format(request.body.decode()))
try: token = data['token']
except: return _error('missing token')
params = data['params'] if 'params' in data else None
reference = data['reference'] if 'reference' in data else None
try: result = cls.tag_token_execute(token, params)
except TokenSignatureError as e: return _error('token signature error [{}]'.format(str(e)), reference)
except TokenFormatError as e: return _error('token format error [{}]'.format(str(e)), reference)
#except ParamsError as e: return _error('parameter error [{}]'.format(str(e)), reference)
except ItemDoesNotExistError as e: return _error('item does not exist [{}]'.format(str(e)), reference)
except TagDoesNotExistError as e: return _error('tag does not exist [{}]'.format(str(e)), reference)
except Exception as e:
#raise
return _error('error executing token [{}::{}]'.format(type(e), str(e)), reference)
return _success(result, reference)
return view
#####################################################################################################
## _DUMMY
class _Dummy(TagMixin, models.Model):
"""
a dummy model allowing to test tagging
"""
title = models.CharField(max_length=32, unique=True, blank=True, default="", null=False, db_index=True)
# some text that allows to identify the record
def __repr__(self):
return "{1}(title='{0.title}')".format(self, self.__class__.__name__)
# THIS CODE SHOULD BE CONVERTED INTO UNIT TESTS
# TODO
#
# from issuetracker.models import Tag
# from issuetracker.models.tag import Token
# Token.create('myns', 'mycmd')
# s=Token.create('myns', 'mycmd',1,100)
# s
# t=Token(s)
# t.namespace
# t.command
# t.tag_id
# t.item_id
#
# s='myns::mycmd::1::100:::b9A_IT7PYroZXeBld1s0mqliyZY'
# t=Token(s)
#
#
# from issuetracker.models import Issue
#
# Issue.tag_token("add", 1, 100)
# Issue.tag_token("remove", 1, 100)
# Issue.tag_token("toggle", 1, 100)
#
# i=Issue.objects.all()[0]
# i.tag_token_add(123)
# i.tag_token_remove(123)
# i.tag_token_toggle(123)
#
# from issuetracker.models import Issue
# from issuetracker.models import Tag
# i=Issue.objects.all()[0]
# s = i.tag_token_add(123)
# s
# Issue.tag_token_execute(s)
#
# tag = Tag.objects.all()[0]
# tag
# s = i.tag_token_add(tag)
# s
# i.tags
# s = i.tag_token_add(tag)
# Issue.tag_token_execute(s)
# i.tags
# s = i.tag_token_remove(tag)
# Issue.tag_token_execute(s)
# i.tags
# s = i.tag_token_toggle(tag)
# Issue.tag_token_execute(s)
# i.tags
#
# i.tag_token_all(tag)
#
# i.tags_token_all
#
# from issuetracker.models import Issue
# from issuetracker.models import Tag
# i=Issue.objects.all()[0]
# t=Tag.objects.all()[0]
# i.has_tag(t)
|
victorbriz/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/same-source-file-name/gyptest-default.py | 502 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Build a .gyp with two targets that share a common .c source file.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('all.gyp', chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello prog1 from func.c
"""
expect2 = """\
Hello from prog2.c
Hello prog2 from func.c
"""
test.run_built_executable('prog1', chdir='relocate/src', stdout=expect1)
test.run_built_executable('prog2', chdir='relocate/src', stdout=expect2)
test.pass_test()
|
chrishas35/django-travis-ci | refs/heads/master | django/contrib/staticfiles/views.py | 98 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
import urllib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.views import static
from django.contrib.staticfiles import finders
def serve(request, path, document_root=None, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
It uses the django.views.static view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the the --insecure "
"option of 'runserver' is used")
normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
|
golgoth31/pybroker | refs/heads/master | __init__.py | 2 | # pybroker
# Copyright (c) 2016 David Sabatie <pybroker@notrenet.com>
#
# This file is part of Pybroker.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
|
GyrosOfWar/servo | refs/heads/master | tests/wpt/web-platform-tests/referrer-policy/generic/tools/generate.py | 115 | #!/usr/bin/env python
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_test_expansion_pattern(spec_test_expansion, test_expansion_schema):
expansion = {}
for artifact in spec_test_expansion:
artifact_value = spec_test_expansion[artifact]
if artifact_value == '*':
expansion[artifact] = test_expansion_schema[artifact]
elif isinstance(artifact_value, list):
expansion[artifact] = artifact_value
else:
expansion[artifact] = [artifact_value]
return expansion
def permute_expansion(expansion, selection = {}, artifact_index = 0):
artifact_order = ['delivery_method', 'redirection', 'origin',
'source_protocol', 'target_protocol', 'subresource',
'referrer_url', 'name']
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, subresource_path,
test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
selection['subresource_path'] = subresource_path
# Oddball: it can be None, so in JS it's null.
selection['referrer_policy_json'] = json.dumps(spec['referrer_policy'])
test_filename = test_file_path_pattern % selection
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
# Adjust the template for the test invoking JS. Indent it to look nice.
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
selection['meta_delivery_method'] = ''
if spec['referrer_policy'] != None:
if selection['delivery_method'] == 'meta-referrer':
selection['meta_delivery_method'] = \
'<meta name="referrer" content="%(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'meta-csp':
selection['meta_delivery_method'] = \
'<meta http-equiv="Content-Security-Policy" ' + \
'content="referrer %(referrer_policy)s">' % spec
elif selection['delivery_method'] == 'http-csp':
selection['meta_delivery_method'] = \
"<!-- No meta: CSP delivered via HTTP headers. -->"
test_headers_filename = test_filename + ".headers"
with open(test_headers_filename, "w") as f:
f.write('Content-Security-Policy: ' + \
'referrer %(referrer_policy)s\n' % spec)
# TODO(kristijanburnik): Limit to WPT origins.
f.write('Access-Control-Allow-Origin: *\n')
elif selection['delivery_method'] == 'attr-referrer':
# attr-referrer is supported by the JS test wrapper.
pass
elif selection['delivery_method'] == 'rel-noreferrer':
# rel=noreferrer is supported by the JS test wrapper.
pass
else:
raise ValueError('Not implemented delivery_method: ' \
+ selection['delivery_method'])
# Obey the lint and pretty format.
if len(selection['meta_delivery_method']) > 0:
selection['meta_delivery_method'] = "\n " + \
selection['meta_delivery_method']
with open(test_filename, 'w') as f:
f.write(test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
with open(generated_spec_json_filename, 'w') as f:
f.write(spec_json_js_template
% {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_test_expansion_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for spec_test_expansion in spec['test_expansion']:
expansion = expand_test_expansion_pattern(spec_test_expansion,
test_expansion_schema)
for selection in permute_expansion(expansion):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
subresource_path = \
spec_json["subresource_path"][selection["subresource"]]
generate_selection(selection,
spec,
subresource_path,
html_template)
else:
print 'Excluding selection:', selection_path
def main(target):
spec_json = load_spec_json();
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target)
|
mollstam/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/contrib/gis/geometry/regex.py | 657 | import re
# Regular expression for recognizing HEXEWKB and WKT. A prophylactic measure
# to prevent potentially malicious input from reaching the underlying C
# library. Not a substitute for good Web security programming practices.
hex_regex = re.compile(r'^[0-9A-F]+$', re.I)
wkt_regex = re.compile(r'^(SRID=(?P<srid>\-?\d+);)?'
r'(?P<wkt>'
r'(?P<type>POINT|LINESTRING|LINEARRING|POLYGON|MULTIPOINT|'
r'MULTILINESTRING|MULTIPOLYGON|GEOMETRYCOLLECTION)'
r'[ACEGIMLONPSRUTYZ\d,\.\-\(\) ]+)$',
re.I)
json_regex = re.compile(r'^(\s+)?\{.*}(\s+)?$', re.DOTALL)
|
opensvn/python | refs/heads/master | mymovies.py | 1 | #!/usr/bin/env python
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import moviedata
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.movies = moviedata.MovieContainer()
self.table = QTableWidget()
self.setCentralWidget(self.table)
def updateTable(self, current=None):
self.table.clear()
self.table.setRowCount(len(self.movies))
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(['Title',
'Year', 'Mins', 'Acquired', 'Notes'])
self.table.setAlternatingRowColors(True)
self.table.setEditTriggers(QTableWidget.NoEditTriggers)
self.table.setSelectionBehavior(QTableWidget.SelectRows)
self.table.setSelectionMode(QTableWidget.SingleSelection)
selected = None |
claudep/pootle | refs/heads/master | pootle/core/debug.py | 3 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import logging
import time
from contextlib import contextmanager
logger = logging.getLogger("POOTLE_DEBUG")
def log_timing(start, timed=None, debug_logger=None):
debug_logger = debug_logger or logger
timing = time.time() - start
if timed:
msg = (
"Timing for %s: %s seconds"
% (timed, timing))
else:
msg = (
"Timing: %s seconds"
% timing)
debug_logger.debug(msg)
def log_new_queries(queries, debug_logger=None):
from django.db import connection
debug_logger = debug_logger or logger
new_queries = list(connection.queries[queries:])
for query in new_queries:
debug_logger.debug(query["time"])
debug_logger.debug("\t%s", query["sql"])
@contextmanager
def timings(timed=None, debug_logger=None):
start = time.time()
yield
log_timing(
start,
timed,
debug_logger or logger)
@contextmanager
def debug_sql(debug_logger=None):
from django.conf import settings
from django.db import connection
debug = settings.DEBUG
settings.DEBUG = True
queries = len(connection.queries)
try:
yield
finally:
log_new_queries(
queries,
debug_logger)
settings.DEBUG = debug
|
emmuchira/kps_erp | refs/heads/develop | erpnext/patches/v4_0/save_default_letterhead.py | 120 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
"""save default letterhead to set default_letter_head_content"""
try:
letter_head = frappe.get_doc("Letter Head", {"is_default": 1})
letter_head.save()
except frappe.DoesNotExistError:
pass
|
tvibliani/odoo | refs/heads/8.0 | addons/payment/models/__init__.py | 389 | # -*- coding: utf-8 -*-
import payment_acquirer
import res_config
|
knittledan/imageResizer | refs/heads/master | PIL/windows/PIL/ImageMath.py | 26 | #
# The Python Imaging Library
# $Id$
#
# a simple math add-on for the Python Imaging Library
#
# History:
# 1999-02-15 fl Original PIL Plus release
# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
#
# Copyright (c) 1999-2005 by Secret Labs AB
# Copyright (c) 2005 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL import _imagingmath
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
VERBOSE = 0
def _isconstant(v):
return isinstance(v, int) or isinstance(v, float)
class _Operand:
# wraps an image operand, providing standard operators
def __init__(self, im):
self.im = im
def __fixup(self, im1):
# convert image to suitable mode
if isinstance(im1, _Operand):
# argument was an image.
if im1.im.mode in ("1", "L"):
return im1.im.convert("I")
elif im1.im.mode in ("I", "F"):
return im1.im
else:
raise ValueError("unsupported mode: %s" % im1.im.mode)
else:
# argument was a constant
if _isconstant(im1) and self.im.mode in ("1", "L", "I"):
return Image.new("I", self.im.size, im1)
else:
return Image.new("F", self.im.size, im1)
def apply(self, op, im1, im2=None, mode=None):
im1 = self.__fixup(im1)
if im2 is None:
# unary operation
out = Image.new(mode or im1.mode, im1.size, None)
im1.load()
try:
op = getattr(_imagingmath, op+"_"+im1.mode)
except AttributeError:
raise TypeError("bad operand type for '%s'" % op)
_imagingmath.unop(op, out.im.id, im1.im.id)
else:
# binary operation
im2 = self.__fixup(im2)
if im1.mode != im2.mode:
# convert both arguments to floating point
if im1.mode != "F":
im1 = im1.convert("F")
if im2.mode != "F":
im2 = im2.convert("F")
if im1.mode != im2.mode:
raise ValueError("mode mismatch")
if im1.size != im2.size:
# crop both arguments to a common size
size = (min(im1.size[0], im2.size[0]),
min(im1.size[1], im2.size[1]))
if im1.size != size:
im1 = im1.crop((0, 0) + size)
if im2.size != size:
im2 = im2.crop((0, 0) + size)
out = Image.new(mode or im1.mode, size, None)
else:
out = Image.new(mode or im1.mode, im1.size, None)
im1.load()
im2.load()
try:
op = getattr(_imagingmath, op+"_"+im1.mode)
except AttributeError:
raise TypeError("bad operand type for '%s'" % op)
_imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id)
return _Operand(out)
# unary operators
def __bool__(self):
# an image is "true" if it contains at least one non-zero pixel
return self.im.getbbox() is not None
if bytes is str:
# Provide __nonzero__ for pre-Py3k
__nonzero__ = __bool__
del __bool__
def __abs__(self):
return self.apply("abs", self)
def __pos__(self):
return self
def __neg__(self):
return self.apply("neg", self)
# binary operators
def __add__(self, other):
return self.apply("add", self, other)
def __radd__(self, other):
return self.apply("add", other, self)
def __sub__(self, other):
return self.apply("sub", self, other)
def __rsub__(self, other):
return self.apply("sub", other, self)
def __mul__(self, other):
return self.apply("mul", self, other)
def __rmul__(self, other):
return self.apply("mul", other, self)
def __truediv__(self, other):
return self.apply("div", self, other)
def __rtruediv__(self, other):
return self.apply("div", other, self)
def __mod__(self, other):
return self.apply("mod", self, other)
def __rmod__(self, other):
return self.apply("mod", other, self)
def __pow__(self, other):
return self.apply("pow", self, other)
def __rpow__(self, other):
return self.apply("pow", other, self)
if bytes is str:
# Provide __div__ and __rdiv__ for pre-Py3k
__div__ = __truediv__
__rdiv__ = __rtruediv__
del __truediv__
del __rtruediv__
# bitwise
def __invert__(self):
return self.apply("invert", self)
def __and__(self, other):
return self.apply("and", self, other)
def __rand__(self, other):
return self.apply("and", other, self)
def __or__(self, other):
return self.apply("or", self, other)
def __ror__(self, other):
return self.apply("or", other, self)
def __xor__(self, other):
return self.apply("xor", self, other)
def __rxor__(self, other):
return self.apply("xor", other, self)
def __lshift__(self, other):
return self.apply("lshift", self, other)
def __rshift__(self, other):
return self.apply("rshift", self, other)
# logical
def __eq__(self, other):
return self.apply("eq", self, other)
def __ne__(self, other):
return self.apply("ne", self, other)
def __lt__(self, other):
return self.apply("lt", self, other)
def __le__(self, other):
return self.apply("le", self, other)
def __gt__(self, other):
return self.apply("gt", self, other)
def __ge__(self, other):
return self.apply("ge", self, other)
# conversions
def imagemath_int(self):
return _Operand(self.im.convert("I"))
def imagemath_float(self):
return _Operand(self.im.convert("F"))
# logical
def imagemath_equal(self, other):
return self.apply("eq", self, other, mode="I")
def imagemath_notequal(self, other):
return self.apply("ne", self, other, mode="I")
def imagemath_min(self, other):
return self.apply("min", self, other)
def imagemath_max(self, other):
return self.apply("max", self, other)
def imagemath_convert(self, mode):
return _Operand(self.im.convert(mode))
ops = {}
for k, v in list(globals().items()):
if k[:10] == "imagemath_":
ops[k[10:]] = v
def eval(expression, _dict={}, **kw):
"""
Evaluates an image expression.
:param expression: A string containing a Python-style expression.
:param options: Values to add to the evaluation context. You
can either use a dictionary, or one or more keyword
arguments.
:return: The evaluated expression. This is usually an image object, but can
also be an integer, a floating point value, or a pixel tuple,
depending on the expression.
"""
# build execution namespace
args = ops.copy()
args.update(_dict)
args.update(kw)
for k, v in list(args.items()):
if hasattr(v, "im"):
args[k] = _Operand(v)
out = builtins.eval(expression, args)
try:
return out.im
except AttributeError:
return out
|
cameron581/kernel_msm | refs/heads/glitch-mod | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
ssssam/rdflib | refs/heads/master | examples/resource.py | 8 | """
RDFLib has a :class:`~rdflib.resource.Resource` class, for a resource-centric API.
A resource acts like a URIRef with an associated graph, and allows
quickly adding or querying for triples where this resource is the
subject.
"""
from rdflib import Graph, RDF, RDFS, Literal
from rdflib.namespace import FOAF
if __name__=='__main__':
g = Graph()
bob = g.resource('urn:bob')
bob.set(RDF.type, FOAF.Person) # .set replaces all other values
bob.set(FOAF.name, Literal("Bob"))
bill = g.resource('urn:bill')
bill.add(RDF.type, FOAF.Person) # add adds to existing values
bill.add(RDF.type, FOAF.Agent)
bill.set(RDFS.label, Literal("Bill"))
bill.add(FOAF.knows, bob)
# Resources returned when querying are 'auto-boxed' as resources:
print "Bill's friend: ", bill.value(FOAF.knows).value(FOAF.name)
# slicing ([] syntax) can also be used:
print "Bill knows: ",
for friend in bill[FOAF.knows]:
print friend[FOAF.name].next(), " "
# or even quicker with paths:
print "Bill knows: ",
for friend in bill[FOAF.knows/FOAF.name]:
print friend
# setting single properties is also possible:
bill[RDFS.label]=Literal("William")
print g.serialize(format='n3')
|
ptrendx/mxnet | refs/heads/master | tests/python/train/test_conv.py | 24 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
sys.path.insert(0, '../../python')
import mxnet as mx
from mxnet.test_utils import get_mnist_ubyte
import numpy as np
import os, pickle, gzip, argparse
import logging
def get_model(use_gpu):
# symbol net
data = mx.symbol.Variable('data')
conv1= mx.symbol.Convolution(data = data, name='conv1', num_filter=32, kernel=(3,3), stride=(2,2))
bn1 = mx.symbol.BatchNorm(data = conv1, name="bn1")
act1 = mx.symbol.Activation(data = bn1, name='relu1', act_type="relu")
mp1 = mx.symbol.Pooling(data = act1, name = 'mp1', kernel=(2,2), stride=(2,2), pool_type='max')
conv2= mx.symbol.Convolution(data = mp1, name='conv2', num_filter=32, kernel=(3,3), stride=(2,2))
bn2 = mx.symbol.BatchNorm(data = conv2, name="bn2")
act2 = mx.symbol.Activation(data = bn2, name='relu2', act_type="relu")
mp2 = mx.symbol.Pooling(data = act2, name = 'mp2', kernel=(2,2), stride=(2,2), pool_type='max')
fl = mx.symbol.Flatten(data = mp2, name="flatten")
fc2 = mx.symbol.FullyConnected(data = fl, name='fc2', num_hidden=10)
softmax = mx.symbol.SoftmaxOutput(data = fc2, name = 'sm')
num_epoch = 1
ctx=mx.gpu() if use_gpu else mx.cpu()
model = mx.model.FeedForward(softmax, ctx,
num_epoch=num_epoch,
learning_rate=0.1, wd=0.0001,
momentum=0.9)
return model
def get_iters():
# check data
get_mnist_ubyte()
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(1, 28, 28),
label_name='sm_label',
batch_size=batch_size, shuffle=True, flat=False, silent=False, seed=10)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(1, 28, 28),
label_name='sm_label',
batch_size=batch_size, shuffle=True, flat=False, silent=False)
return train_dataiter, val_dataiter
# run default with unit test framework
def test_mnist():
iters = get_iters()
exec_mnist(get_model(False), iters[0], iters[1])
def exec_mnist(model, train_dataiter, val_dataiter):
# print logging by default
logging.basicConfig(level=logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console)
model.fit(X=train_dataiter,
eval_data=val_dataiter)
logging.info('Finish fit...')
prob = model.predict(val_dataiter)
logging.info('Finish predict...')
val_dataiter.reset()
y = np.concatenate([batch.label[0].asnumpy() for batch in val_dataiter]).astype('int')
py = np.argmax(prob, axis=1)
acc1 = float(np.sum(py == y)) / len(y)
logging.info('final accuracy = %f', acc1)
assert(acc1 > 0.94)
# run as a script
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', action='store_true', help='use gpu to train')
args = parser.parse_args()
iters = get_iters()
exec_mnist(get_model(args.gpu), iters[0], iters[1])
|
eugene1g/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/CygwinDownloader/cygwin-downloader.py | 120 | #!/usr/bin/env python
import os, random, sys, time, urllib
#
# Options
#
dry_run = len(sys.argv) > 1 and "--dry-run" in set(sys.argv[1:])
quiet = len(sys.argv) > 1 and "--quiet" in set(sys.argv[1:])
#
# Functions and constants
#
def download_progress_hook(block_count, block_size, total_blocks):
if quiet or random.random() > 0.5:
return
sys.stdout.write(".")
sys.stdout.flush()
def download_url_to_file(url, file, message):
if not quiet:
print message + " ",
if not dry_run:
dir = os.path.dirname(file)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
urllib.urlretrieve(url, file, download_progress_hook)
if not quiet:
print
# This is mostly just the list of North America http mirrors from http://cygwin.com/mirrors.html,
# but a few have been removed that seemed unresponsive from Cupertino.
mirror_servers = ["http://cygwin.elite-systems.org/",
"http://mirror.mcs.anl.gov/cygwin/",
"http://cygwin.osuosl.org/",
"http://mirrors.kernel.org/sourceware/cygwin/",
"http://mirrors.xmission.com/cygwin/",
"http://sourceware.mirrors.tds.net/pub/sourceware.org/cygwin/"]
package_mirror_url = mirror_servers[random.choice(range(len(mirror_servers)))]
def download_package(package, message):
download_url_to_file(package_mirror_url + package["path"], package["path"], message)
required_packages = frozenset(["apache",
"bc",
"bison",
"curl",
"diffutils",
"e2fsprogs",
"emacs",
"flex",
"gcc",
"gperf",
"keychain",
"make",
"minires",
"nano",
"openssh",
"patch",
"perl",
"perl-libwin32",
"python",
"rebase",
"rsync",
"ruby",
"subversion",
"unzip",
"vim",
"zip"])
#
# Main
#
print "Using Cygwin mirror server " + package_mirror_url + " to download setup.ini..."
urllib.urlretrieve(package_mirror_url + "setup.ini", "setup.ini.orig")
downloaded_packages_file_path = "setup.ini.orig"
downloaded_packages_file = file(downloaded_packages_file_path, "r")
if not dry_run:
modified_packages_file = file("setup.ini", "w")
packages = {}
current_package = ''
for line in downloaded_packages_file.readlines():
if line[0] == "@":
current_package = line[2:-1]
packages[current_package] = {"name": current_package, "needs_download": False, "requires": [], "path": ""}
elif line[:10] == "category: ":
if current_package in required_packages:
line = "category: Base\n"
if "Base" in set(line[10:-1].split()):
packages[current_package]["needs_download"] = True
elif line[:10] == "requires: ":
packages[current_package]["requires"] = line[10:].split()
packages[current_package]["requires"].sort()
elif line[:9] == "install: " and not len(packages[current_package]["path"]):
end_of_path = line.find(" ", 9)
if end_of_path != -1:
packages[current_package]["path"] = line[9:end_of_path]
if not dry_run:
modified_packages_file.write(line)
downloaded_packages_file.close()
os.remove(downloaded_packages_file_path)
if not dry_run:
modified_packages_file.close()
names_to_download = set()
package_names = packages.keys()
package_names.sort()
def add_package_and_dependencies(name):
if name in names_to_download:
return
if not name in packages:
return
packages[name]["needs_download"] = True
names_to_download.add(name)
for dep in packages[name]["requires"]:
add_package_and_dependencies(dep)
for name in package_names:
if packages[name]["needs_download"]:
add_package_and_dependencies(name)
downloaded_so_far = 0
for name in package_names:
if packages[name]["needs_download"]:
downloaded_so_far += 1
download_package(packages[name], "Downloading package %3d of %3d (%s)" % (downloaded_so_far, len(names_to_download), name))
download_url_to_file("http://cygwin.com/setup.exe", "setup.exe", "Downloading setup.exe")
seconds_to_sleep = 10
print """
Finished downloading Cygwin. In %d seconds,
I will run setup.exe. Select the "Install
from Local Directory" option and browse to
"%s"
when asked for the "Local Package Directory".
""" % (seconds_to_sleep, os.getcwd())
while seconds_to_sleep > 0:
print "%d..." % seconds_to_sleep,
sys.stdout.flush()
time.sleep(1)
seconds_to_sleep -= 1
print
if not dry_run:
os.execl("setup.exe")
|
vaginessa/pyload | refs/heads/stable | module/setup.py | 35 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from getpass import getpass
import module.common.pylgettext as gettext
import os
from os import makedirs
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
from subprocess import PIPE
from subprocess import call
import sys
from sys import exit
from module.utils import get_console_encoding
class Setup():
"""
pyLoads initial setup configuration assistent
"""
def __init__(self, path, config):
self.path = path
self.config = config
self.stdin_encoding = get_console_encoding(sys.stdin.encoding)
def start(self):
langs = self.config.getMetaData("general", "language")["type"].split(";")
lang = self.ask(u"Choose your Language / Wähle deine Sprache", "en", langs)
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"), languages=[lang, "en"], fallback=True)
translation.install(True)
#Input shorthand for yes
self.yes = _("y")
#Input shorthand for no
self.no = _("n")
# print ""
# print _("Would you like to configure pyLoad via Webinterface?")
# print _("You need a Browser and a connection to this PC for it.")
# viaweb = self.ask(_("Start initial webinterface for configuration?"), "y", bool=True)
# if viaweb:
# try:
# from module.web import ServerThread
# ServerThread.setup = self
# from module.web import webinterface
# webinterface.run_simple()
# return False
# except Exception, e:
# print "Setup failed with this error: ", e
# print "Falling back to commandline setup."
print ""
print _("Welcome to the pyLoad Configuration Assistent.")
print _("It will check your system and make a basic setup in order to run pyLoad.")
print ""
print _("The value in brackets [] always is the default value,")
print _("in case you don't want to change it or you are unsure what to choose, just hit enter.")
print _(
"Don't forget: You can always rerun this assistent with --setup or -s parameter, when you start pyLoadCore.")
print _("If you have any problems with this assistent hit STRG-C,")
print _("to abort and don't let him start with pyLoadCore automatically anymore.")
print ""
print _("When you are ready for system check, hit enter.")
raw_input()
basic, ssl, captcha, gui, web, js = self.system_check()
print ""
if not basic:
print _("You need pycurl, sqlite and python 2.5, 2.6 or 2.7 to run pyLoad.")
print _("Please correct this and re-run pyLoad.")
print _("Setup will now close.")
raw_input()
return False
raw_input(_("System check finished, hit enter to see your status report."))
print ""
print _("## Status ##")
print ""
avail = []
if self.check_module("Crypto"): avail.append(_("container decrypting"))
if ssl: avail.append(_("ssl connection"))
if captcha: avail.append(_("automatic captcha decryption"))
if gui: avail.append(_("GUI"))
if web: avail.append(_("Webinterface"))
if js: avail.append(_("extended Click'N'Load"))
string = ""
for av in avail:
string += ", " + av
print _("Features available:") + string[1:]
print ""
if len(avail) < 5:
print _("Featues missing: ")
print
if not self.check_module("Crypto"):
print _("no py-crypto available")
print _("You need this if you want to decrypt container files.")
print ""
if not ssl:
print _("no SSL available")
print _("This is needed if you want to establish a secure connection to core or webinterface.")
print _("If you only want to access locally to pyLoad ssl is not usefull.")
print ""
if not captcha:
print _("no Captcha Recognition available")
print _("Only needed for some hosters and as freeuser.")
print ""
if not gui:
print _("Gui not available")
print _("The Graphical User Interface.")
print ""
if not js:
print _("no JavaScript engine found")
print _("You will need this for some Click'N'Load links. Install Spidermonkey, ossp-js, pyv8 or rhino")
print _("You can abort the setup now and fix some dependicies if you want.")
con = self.ask(_("Continue with setup?"), self.yes, bool=True)
if not con:
return False
print ""
print _("Do you want to change the config path? Current is %s") % abspath("")
print _(
"If you use pyLoad on a server or the home partition lives on an iternal flash it may be a good idea to change it.")
path = self.ask(_("Change config path?"), self.no, bool=True)
if path:
self.conf_path()
#calls exit when changed
print ""
print _("Do you want to configure login data and basic settings?")
print _("This is recommend for first run.")
con = self.ask(_("Make basic setup?"), self.yes, bool=True)
if con:
self.conf_basic()
if ssl:
print ""
print _("Do you want to configure ssl?")
ssl = self.ask(_("Configure ssl?"), self.no, bool=True)
if ssl:
self.conf_ssl()
if web:
print ""
print _("Do you want to configure webinterface?")
web = self.ask(_("Configure webinterface?"), self.yes, bool=True)
if web:
self.conf_web()
print ""
print _("Setup finished successfully.")
print _("Hit enter to exit and restart pyLoad")
raw_input()
return True
def system_check(self):
""" make a systemcheck and return the results"""
print _("## System Check ##")
if sys.version_info[:2] > (2, 7):
print _("Your python version is to new, Please use Python 2.6/2.7")
python = False
elif sys.version_info[:2] < (2, 5):
print _("Your python version is to old, Please use at least Python 2.5")
python = False
else:
print _("Python Version: OK")
python = True
curl = self.check_module("pycurl")
self.print_dep("pycurl", curl)
sqlite = self.check_module("sqlite3")
self.print_dep("sqlite3", sqlite)
basic = python and curl and sqlite
print ""
crypto = self.check_module("Crypto")
self.print_dep("pycrypto", crypto)
ssl = self.check_module("OpenSSL")
self.print_dep("py-OpenSSL", ssl)
print ""
pil = self.check_module("Image")
self.print_dep("py-imaging", pil)
if os.name == "nt":
tesser = self.check_prog([join(pypath, "tesseract", "tesseract.exe"), "-v"])
else:
tesser = self.check_prog(["tesseract", "-v"])
self.print_dep("tesseract", tesser)
captcha = pil and tesser
print ""
gui = self.check_module("PyQt4")
self.print_dep("PyQt4", gui)
print ""
jinja = True
try:
import jinja2
v = jinja2.__version__
if v and "unknown" not in v:
if not v.startswith("2.5") and not v.startswith("2.6"):
print _("Your installed jinja2 version %s seems too old.") % jinja2.__version__
print _("You can safely continue but if the webinterface is not working,")
print _("please upgrade or deinstall it, pyLoad includes a sufficient jinja2 libary.")
print
jinja = False
except:
pass
self.print_dep("jinja2", jinja)
beaker = self.check_module("beaker")
self.print_dep("beaker", beaker)
web = sqlite and beaker
from module.common import JsEngine
js = True if JsEngine.ENGINE else False
self.print_dep(_("JS engine"), js)
return basic, ssl, captcha, gui, web, js
def conf_basic(self):
print ""
print _("## Basic Setup ##")
print ""
print _("The following logindata is valid for CLI, GUI and webinterface.")
from module.database import DatabaseBackend
db = DatabaseBackend(None)
db.setup()
username = self.ask(_("Username"), "User")
password = self.ask("", "", password=True)
db.addUser(username, password)
db.shutdown()
print ""
print _("External clients (GUI, CLI or other) need remote access to work over the network.")
print _("However, if you only want to use the webinterface you may disable it to save ram.")
self.config["remote"]["activated"] = self.ask(_("Enable remote access"), self.yes, bool=True)
print ""
langs = self.config.getMetaData("general", "language")
self.config["general"]["language"] = self.ask(_("Language"), "en", langs["type"].split(";"))
self.config["general"]["download_folder"] = self.ask(_("Downloadfolder"), "Downloads")
self.config["download"]["max_downloads"] = self.ask(_("Max parallel downloads"), "3")
#print _("You should disable checksum proofing, if you have low hardware requirements.")
#self.config["general"]["checksum"] = self.ask(_("Proof checksum?"), "y", bool=True)
reconnect = self.ask(_("Use Reconnect?"), self.no, bool=True)
self.config["reconnect"]["activated"] = reconnect
if reconnect:
self.config["reconnect"]["method"] = self.ask(_("Reconnect script location"), "./reconnect.sh")
def conf_web(self):
print ""
print _("## Webinterface Setup ##")
print ""
self.config["webinterface"]["activated"] = self.ask(_("Activate webinterface?"), self.yes, bool=True)
print ""
print _("Listen address, if you use 127.0.0.1 or localhost, the webinterface will only accessible locally.")
self.config["webinterface"]["host"] = self.ask(_("Address"), "0.0.0.0")
self.config["webinterface"]["port"] = self.ask(_("Port"), "8000")
print ""
print _("pyLoad offers several server backends, now following a short explanation.")
print "builtin:", _("Default server, best choice if you dont know which one to choose.")
print "threaded:", _("This server offers SSL and is a good alternative to builtin.")
print "fastcgi:", _(
"Can be used by apache, lighttpd, requires you to configure them, which is not too easy job.")
print "lightweight:", _("Very fast alternative written in C, requires libev and linux knowlegde.")
print "\t", _("Get it from here: https://github.com/jonashaag/bjoern, compile it")
print "\t", _("and copy bjoern.so to module/lib")
print
print _(
"Attention: In some rare cases the builtin server is not working, if you notice problems with the webinterface")
print _("come back here and change the builtin server to the threaded one here.")
self.config["webinterface"]["server"] = self.ask(_("Server"), "builtin",
["builtin", "threaded", "fastcgi", "lightweight"])
def conf_ssl(self):
print ""
print _("## SSL Setup ##")
print ""
print _("Execute these commands from pyLoad config folder to make ssl certificates:")
print ""
print "openssl genrsa -out ssl.key 1024"
print "openssl req -new -key ssl.key -out ssl.csr"
print "openssl req -days 36500 -x509 -key ssl.key -in ssl.csr > ssl.crt "
print ""
print _("If you're done and everything went fine, you can activate ssl now.")
self.config["ssl"]["activated"] = self.ask(_("Activate SSL?"), self.yes, bool=True)
def set_user(self):
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"),
languages=[self.config["general"]["language"], "en"], fallback=True)
translation.install(True)
from module.database import DatabaseBackend
db = DatabaseBackend(None)
db.setup()
noaction = True
try:
while True:
print _("Select action")
print _("1 - Create/Edit user")
print _("2 - List users")
print _("3 - Remove user")
print _("4 - Quit")
action = raw_input("[1]/2/3/4: ")
if not action in ("1", "2", "3", "4"):
continue
elif action == "1":
print ""
username = self.ask(_("Username"), "User")
password = self.ask("", "", password=True)
db.addUser(username, password)
noaction = False
elif action == "2":
print ""
print _("Users")
print "-----"
users = db.listUsers()
noaction = False
for user in users:
print user
print "-----"
print ""
elif action == "3":
print ""
username = self.ask(_("Username"), "")
if username:
db.removeUser(username)
noaction = False
elif action == "4":
break
finally:
if not noaction:
db.shutdown()
def conf_path(self, trans=False):
if trans:
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("setup", join(self.path, "locale"),
languages=[self.config["general"]["language"], "en"], fallback=True)
translation.install(True)
print _("Setting new configpath, current configuration will not be transfered!")
path = self.ask(_("Configpath"), abspath(""))
try:
path = join(pypath, path)
if not exists(path):
makedirs(path)
f = open(join(pypath, "module", "config", "configdir"), "wb")
f.write(path)
f.close()
print _("Configpath changed, setup will now close, please restart to go on.")
print _("Press Enter to exit.")
raw_input()
exit()
except Exception, e:
print _("Setting config path failed: %s") % str(e)
def print_dep(self, name, value):
"""Print Status of dependency"""
if value:
print _("%s: OK") % name
else:
print _("%s: missing") % name
def check_module(self, module):
try:
__import__(module)
return True
except:
return False
def check_prog(self, command):
pipe = PIPE
try:
call(command, stdout=pipe, stderr=pipe)
return True
except:
return False
def ask(self, qst, default, answers=[], bool=False, password=False):
"""produce one line to asking for input"""
if answers:
info = "("
for i, answer in enumerate(answers):
info += (", " if i != 0 else "") + str((answer == default and "[%s]" % answer) or answer)
info += ")"
elif bool:
if default == self.yes:
info = "([%s]/%s)" % (self.yes, self.no)
else:
info = "(%s/[%s])" % (self.yes, self.no)
else:
info = "[%s]" % default
if password:
p1 = True
p2 = False
while p1 != p2:
# getpass(_("Password: ")) will crash on systems with broken locales (Win, NAS)
sys.stdout.write(_("Password: "))
p1 = getpass("")
if len(p1) < 4:
print _("Password too short. Use at least 4 symbols.")
continue
sys.stdout.write(_("Password (again): "))
p2 = getpass("")
if p1 == p2:
return p1
else:
print _("Passwords did not match.")
while True:
try:
input = raw_input(qst + " %s: " % info)
except KeyboardInterrupt:
print "\nSetup interrupted"
exit()
input = input.decode(self.stdin_encoding)
if input.strip() == "":
input = default
if bool:
# yes, true,t are inputs for booleans with value true
if input.lower().strip() in [self.yes, _("yes"), _("true"), _("t"), "yes"]:
return True
# no, false,f are inputs for booleans with value false
elif input.lower().strip() in [self.no, _("no"), _("false"), _("f"), "no"]:
return False
else:
print _("Invalid Input")
continue
if not answers:
return input
else:
if input in answers:
return input
else:
print _("Invalid Input")
if __name__ == "__main__":
test = Setup(join(abspath(dirname(__file__)), ".."), None)
test.start()
|
uglyboxer/linear_neuron | refs/heads/master | net-p3/lib/python3.5/site-packages/scipy/sparse/csgraph/setup.py | 137 | from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('csgraph', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('_shortest_path',
sources=['_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_traversal',
sources=['_traversal.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_min_spanning_tree',
sources=['_min_spanning_tree.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_reordering',
sources=['_reordering.c'],
include_dirs=[numpy.get_include()])
config.add_extension('_tools',
sources=['_tools.c'],
include_dirs=[numpy.get_include()])
return config
|
ilastikdev/ilastik | refs/heads/master | ilastik/applets/dataSelection/dataSelectionApplet.py | 1 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
import glob
import argparse
import collections
import logging
logger = logging.getLogger(__name__)
from lazyflow.utility import PathComponents, isUrl
from ilastik.applets.base.applet import Applet
from opDataSelection import OpMultiLaneDataSelectionGroup, DatasetInfo
from dataSelectionSerializer import DataSelectionSerializer, Ilastik05DataSelectionDeserializer
class DataSelectionApplet( Applet ):
"""
This applet allows the user to select sets of input data,
which are provided as outputs in the corresponding top-level applet operator.
"""
DEFAULT_INSTRUCTIONS = "Use the controls shown to the right to add image files to this workflow."
def __init__(self, workflow, title, projectFileGroupName, supportIlastik05Import=False, batchDataGui=False, force5d=False, instructionText=DEFAULT_INSTRUCTIONS, max_lanes=None):
self.__topLevelOperator = OpMultiLaneDataSelectionGroup(parent=workflow, force5d=force5d)
super(DataSelectionApplet, self).__init__( title, syncWithImageIndex=False )
self._serializableItems = [ DataSelectionSerializer(self.topLevelOperator, projectFileGroupName) ]
if supportIlastik05Import:
self._serializableItems.append(Ilastik05DataSelectionDeserializer(self.topLevelOperator))
self._instructionText = instructionText
self._gui = None
self._batchDataGui = batchDataGui
self._title = title
self._max_lanes = max_lanes
self.busy = False
#
# GUI
#
def getMultiLaneGui( self ):
if self._gui is None:
from dataSelectionGui import DataSelectionGui, GuiMode
guiMode = { True: GuiMode.Batch, False: GuiMode.Normal }[self._batchDataGui]
self._gui = DataSelectionGui( self,
self.topLevelOperator,
self._serializableItems[0],
self._instructionText,
guiMode,
self._max_lanes )
return self._gui
#
# Top-level operator
#
@property
def topLevelOperator(self):
return self.__topLevelOperator
#
# Project serialization
#
@property
def dataSerializers(self):
return self._serializableItems
def parse_known_cmdline_args(self, cmdline_args):
"""
Helper function for headless workflows.
Parses command-line args that can be used to configure the ``DataSelectionApplet`` top-level operator
and returns ``(parsed_args, unused_args)``, similar to ``argparse.ArgumentParser.parse_known_args()``
Relative paths are converted to absolute paths **according to ``os.getcwd()``**,
not according to the project file location, since this more likely to be what headless users expect.
.. note: If the top-level operator was configured with multiple 'roles', then the input files for
each role can be configured separately:
$ python ilastik.py [other workflow options] --my-role-A inputA1.png inputA2.png --my-role-B inputB1.png, inputB2.png
If the workflow has only one role (or only one required role), then the role-name flag can be omitted:
# python ilastik.py [other workflow options] input1.png input2.png
See also: :py:meth:`configure_operator_with_parsed_args()`.
"""
role_names = self.topLevelOperator.DatasetRoles.value
arg_parser = argparse.ArgumentParser()
if role_names:
for role_name in role_names:
arg_name = self._role_name_to_arg_name(role_name)
arg_parser.add_argument('--' + arg_name, nargs='+', help='List of input files for the {} role'.format( role_name ))
# Finally, a catch-all for role 0 (if the workflow only has one role, there's no need to provide role names
arg_parser.add_argument('input_files', nargs='*', help='List of input files to process.')
arg_parser.add_argument('--preconvert_stacks', help="Convert image stacks to temporary hdf5 files before loading them.", action='store_true', default=False)
parsed_args, unused_args = arg_parser.parse_known_args(cmdline_args)
for i, path in enumerate( parsed_args.input_files ):
# Replace '~' with home dir
parsed_args.input_files[i] = os.path.expanduser( path )
# Check for errors: Do all input files exist?
all_input_paths = list(parsed_args.input_files)
for role_name in role_names:
arg_name = self._role_name_to_arg_name(role_name)
role_paths = getattr(parsed_args, arg_name)
if role_paths:
all_input_paths += role_paths
error = False
for p in all_input_paths:
if isUrl(p):
# Don't error-check urls in advance.
continue
p = PathComponents(p).externalPath
if '*' in p:
if len(glob.glob(p)) == 0:
logger.error("Could not find any files for globstring: {}".format(p))
logger.error("Check your quotes!")
error = True
elif not os.path.exists(p):
logger.error("Input file does not exist: " + p)
error = True
if error:
raise RuntimeError("Could not find one or more input files. See logged errors.")
return parsed_args, unused_args
def _role_name_to_arg_name(self, role_name):
arg_name = role_name
arg_name = arg_name.lower()
arg_name = arg_name.replace(' ', '_').replace('-', '_')
return arg_name
def configure_operator_with_parsed_args(self, parsed_args):
"""
Helper function for headless workflows.
Configures this applet's top-level operator according to the settings provided in ``parsed_args``.
:param parsed_args: Must be an ``argparse.Namespace`` as returned by :py:meth:`parse_known_cmdline_args()`.
"""
role_names = self.topLevelOperator.DatasetRoles.value
role_paths = collections.OrderedDict()
if role_names:
for role_index, role_name in enumerate(role_names):
arg_name = self._role_name_to_arg_name(role_name)
input_paths = getattr(parsed_args, arg_name)
role_paths[role_index] = input_paths
if parsed_args.input_files:
# We allow the file list to go to the 'default' role, but only if no other roles were explicitly configured.
for role_index, input_paths in role_paths.items():
if input_paths:
# FIXME: This error message could be more helpful.
role_args = map( self._role_name_to_arg_name, role_names )
role_args = map( lambda s: '--' + s, role_args )
role_args_str = ", ".join( role_args )
raise Exception("Invalid command line arguments: All roles must be configured explicitly.\n"
"Use the following flags to specify which files are matched with which inputs:\n"
+ role_args_str )
role_paths = { 0 : parsed_args.input_files }
for role_index, input_paths in role_paths.items():
# If the user doesn't want image stacks to be copied into the project file,
# we generate hdf5 volumes in a temporary directory and use those files instead.
if parsed_args.preconvert_stacks:
import tempfile
input_paths = self.convertStacksToH5( input_paths, tempfile.gettempdir() )
input_infos = []
for p in input_paths:
info = DatasetInfo()
info.location = DatasetInfo.Location.FileSystem
info.filePath = p
comp = PathComponents(p)
# Convert all (non-url) paths to absolute
# (otherwise they are relative to the project file, which probably isn't what the user meant)
if not isUrl(p):
comp.externalPath = os.path.abspath(comp.externalPath)
info.filePath = comp.totalPath()
info.nickname = comp.filenameBase
# Remove globstring syntax.
if '*' in info.nickname:
info.nickname = info.nickname.replace('*', '')
if os.path.pathsep in info.nickname:
info.nickname = PathComponents(info.nickname.split(os.path.pathsep)[0]).fileNameBase
input_infos.append(info)
opDataSelection = self.topLevelOperator
existing_lanes = len(opDataSelection.DatasetGroup)
opDataSelection.DatasetGroup.resize( max(len(input_infos), existing_lanes) )
for lane_index, info in enumerate(input_infos):
opDataSelection.DatasetGroup[lane_index][role_index].setValue( info )
need_warning = False
for lane_index in range(len(input_infos)):
output_slot = opDataSelection.ImageGroup[lane_index][role_index]
if output_slot.meta.prefer_2d:
need_warning = True
break
if need_warning:
logger.warn("*******************************************************************************************")
logger.warn("Some of your input data is stored in a format that is not efficient for 3D access patterns.")
logger.warn("Performance may suffer as a result. For best performance, use a chunked HDF5 volume.")
logger.warn("*******************************************************************************************")
@classmethod
def convertStacksToH5(cls, filePaths, stackVolumeCacheDir):
"""
If any of the files in filePaths appear to be globstrings for a stack,
convert the given stack to hdf5 format.
Return the filePaths list with globstrings replaced by the paths to the new hdf5 volumes.
"""
import hashlib
import pickle
import h5py
from lazyflow.graph import Graph
from lazyflow.operators.ioOperators import OpStackToH5Writer
filePaths = list(filePaths)
for i, path in enumerate(filePaths):
if '*' in path:
globstring = path
# Embrace paranoia:
# We want to make sure we never re-use a stale cache file for a new dataset,
# even if the dataset is located in the same location as a previous one and has the same globstring!
# Create a sha-1 of the file name and modification date.
sha = hashlib.sha1()
files = [k.replace('\\', '/') for k in glob.glob( path )]
for f in files:
sha.update(f)
sha.update(pickle.dumps(os.stat(f).st_mtime))
stackFile = sha.hexdigest() + '.h5'
stackPath = os.path.join( stackVolumeCacheDir, stackFile ).replace('\\', '/')
# Overwrite original path
filePaths[i] = stackPath + "/volume/data"
# Generate the hdf5 if it doesn't already exist
if os.path.exists(stackPath):
logger.info( "Using previously generated hdf5 volume for stack {}".format(path) )
logger.info( "Volume path: {}".format(filePaths[i]) )
else:
logger.info( "Generating hdf5 volume for stack {}".format(path) )
logger.info( "Volume path: {}".format(filePaths[i]) )
if not os.path.exists( stackVolumeCacheDir ):
os.makedirs( stackVolumeCacheDir )
with h5py.File(stackPath) as f:
# Configure the conversion operator
opWriter = OpStackToH5Writer( graph=Graph() )
opWriter.hdf5Group.setValue(f)
opWriter.hdf5Path.setValue("volume/data")
opWriter.GlobString.setValue(globstring)
# Initiate the write
success = opWriter.WriteImage.value
assert success, "Something went wrong when generating an hdf5 file from an image sequence."
return filePaths
def configureRoleFromJson(self, lane, role, dataset_info_namespace):
opDataSelection = self.topLevelOperator
logger.debug( "Configuring dataset for role {}".format( role ) )
logger.debug( "Params: {}".format(dataset_info_namespace) )
datasetInfo = DatasetInfo()
datasetInfo.updateFromJson( dataset_info_namespace )
# Check for globstring, which means we need to import the stack first.
if '*' in datasetInfo.filePath:
totalProgress = [-100]
def handleStackImportProgress( progress ):
if progress / 10 != totalProgress[0] / 10:
totalProgress[0] = progress
logger.info( "Importing stack: {}%".format( totalProgress[0] ) )
serializer = self.dataSerializers[0]
serializer.progressSignal.connect( handleStackImportProgress )
serializer.importStackAsLocalDataset( datasetInfo )
opDataSelection.DatasetGroup[lane][role].setValue( datasetInfo )
|
kivy/kivy | refs/heads/master | examples/widgets/sequenced_images/uix/custom_button.py | 4 |
__all__ = ('AnimatedButton')
from kivy.factory import Factory
from kivy.uix.label import Label
from kivy.uix.image import Image
from kivy.properties import StringProperty, OptionProperty, \
ObjectProperty, BooleanProperty
class AnimatedButton(Label):
state = OptionProperty('normal', options=('normal', 'down'))
allow_stretch = BooleanProperty(True)
keep_ratio = BooleanProperty(False)
border = ObjectProperty(None)
anim_delay = ObjectProperty(None)
background_normal = StringProperty(
'atlas://data/images/defaulttheme/button')
texture_background = ObjectProperty(None)
background_down = StringProperty(
'atlas://data/images/defaulttheme/button_pressed')
def __init__(self, **kwargs):
super(AnimatedButton, self).__init__(**kwargs)
self.register_event_type('on_press')
self.register_event_type('on_release')
# borderImage.border by default is ...
self.border = (16, 16, 16, 16)
# Image to display depending on state
self.img = Image(
source=self.background_normal,
allow_stretch=self.allow_stretch,
keep_ratio=self.keep_ratio,
mipmap=True)
# reset animation if anim_delay is changed
def anim_reset(*l):
self.img.anim_delay = self.anim_delay
self.bind(anim_delay=anim_reset)
self.anim_delay = .1
# update self.texture when image.texture changes
self.img.bind(texture=self.on_tex_changed)
self.on_tex_changed()
# update image source when background image is changed
def background_changed(*l):
self.img.source = self.background_normal
self.anim_delay = .1
self.bind(background_normal=background_changed)
def on_tex_changed(self, *largs):
self.texture_background = self.img.texture
def _do_press(self):
self.state = 'down'
def _do_release(self):
self.state = 'normal'
def on_touch_down(self, touch):
if not self.collide_point(touch.x, touch.y):
return False
if repr(self) in touch.ud:
return False
touch.grab(self)
touch.ud[repr(self)] = True
_animdelay = self.img.anim_delay
self.img.source = self.background_down
self.img.anim_delay = _animdelay
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
return repr(self) in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
assert(repr(self) in touch.ud)
touch.ungrab(self)
_animdelay = self.img._coreimage.anim_delay
self.img.source = self.background_normal
self.anim_delay = _animdelay
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
Factory.register('AnimatedButton', cls=AnimatedButton)
|
ledtvavs/repository.ledtv | refs/heads/master | script.module.urlresolver/lib/urlresolver/plugins/rpnet.py | 3 | """
urlresolver XBMC Addon
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import json
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
logger = common.log_utils.Logger.get_logger(__name__)
logger.disable()
class RPnetResolver(UrlResolver):
name = "RPnet"
domains = ["*"]
def __init__(self):
self.net = common.Net()
self.patterns = None
self.hosts = None
# UrlResolver methods
def get_media_url(self, host, media_id):
username = self.get_setting('username')
password = self.get_setting('password')
url = 'https://premium.rpnet.biz/client_api.php'
query = urllib.urlencode({'username': username, 'password': password, 'action': 'generate', 'links': media_id})
url = url + '?' + query
response = self.net.http_GET(url).content
response = json.loads(response)
if 'links' in response and response['links']:
link = response['links'][0]
if 'generated' in link:
return link['generated']
elif 'error' in link:
raise ResolverError(link['error'])
else:
msg = 'No Link Returned'
if 'error' in response and response['error']:
msg += ': %s' % (response['error'][0])
raise ResolverError(msg)
def get_url(self, host, media_id):
return media_id
def get_host_and_id(self, url):
return 'rpnet.biz', url
@common.cache.cache_method(cache_limit=8)
def get_all_hosters(self):
try:
patterns = []
url = 'http://premium.rpnet.biz/hoster.json'
response = self.net.http_GET(url).content
hosters = json.loads(response)
logger.log_debug('rpnet patterns: %s' % (hosters))
patterns = [re.compile(pattern) for pattern in hosters['supported']]
except Exception as e:
logger.log_error('Error getting RPNet patterns: %s' % (e))
return patterns
@common.cache.cache_method(cache_limit=8)
def get_hosts(self):
try:
hosts = []
url = 'http://premium.rpnet.biz/hoster2.json'
response = self.net.http_GET(url).content
hosts = json.loads(response)['supported']
logger.log_debug('rpnet hosts: %s' % (hosts))
except Exception as e:
logger.log_error('Error getting RPNet hosts: %s' % (e))
return hosts
def valid_url(self, url, host):
if url:
if self.patterns is None:
self.patterns = self.get_all_hosters()
if any(pattern.search(url) for pattern in self.patterns):
return True
elif host:
if self.hosts is None:
self.hosts = self.get_hosts()
if host.startswith('www.'): host = host.replace('www.', '')
if any(host in item for item in self.hosts):
return True
return False
@classmethod
def get_settings_xml(cls):
xml = super(cls, cls).get_settings_xml()
xml.append('<setting id="%s_login" type="bool" label="%s" default="false"/>' % (cls.__name__, i18n('login')))
xml.append('<setting id="%s_username" enable="eq(-1,true)" type="text" label="%s" default=""/>' % (cls.__name__, i18n('username')))
xml.append('<setting id="%s_password" enable="eq(-2,true)" type="text" label="%s" option="hidden" default=""/>' % (cls.__name__, i18n('password')))
return xml
@classmethod
def isUniversal(self):
return True
|
joeybrown/MeetupRestApp | refs/heads/master | App/api/urls.py | 1 | from django.conf.urls import patterns, url, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#SOAP-like URLS (Non-RESTful)
url(r'^get_group_info/((?P<group_name>.+)/)?$', 'App.api.views.get_group_info', name='get_group_info'),
url(r'^get_group_events/$', 'App.api.views.get_group_events', name='get_group_events'),
url(r'^get_user_meetup_id/$', 'App.api.views.get_user_meetup_id', name='get_user_meetup_id'),
#RESTful URLs
url(r'^rsvps(/(?P<rsvp_id>.+))?$', 'App.api.views.rsvp', name='rsvp'),
url(r'^events(/(?P<event_id>.+))?$', 'App.api.views.event', name='event'),
) |
andrei-alpha/seep-analytics | refs/heads/master | monitor/util.py | 1 | import re
import psutil
import requests
# This function will be called from Metrics or Resources class
def send(self, payload):
success = False
pause = 1
while not success and self.working:
try:
requests.post(self.host + '/event', data=payload)
success = True
except requests.ConnectionError:
self.log.debug('Connection problems. Retrying in', pause, ' sec..')
self.sleep(pause)
pause = min(60, pause * 2)
def getJVMArgs(out, pid):
args = filter(lambda x: re.search('^' + str(pid) + '\s', x), out)
if len(args):
return args[0].split(' ')
return ''
def killAll(proc_pid):
try:
process = psutil.Process(proc_pid)
for proc in process.get_children(recursive=True):
proc.kill()
process.kill()
except:
pass |
mese79/three.js | refs/heads/dev | utils/exporters/blender/addons/io_three/exporter/scene.py | 32 | import os
from .. import constants, logger
from . import (
base_classes,
texture,
material,
geometry,
object as object_,
utilities,
io,
api
)
from bpy import context
class Scene(base_classes.BaseScene):
"""Class that handles the contruction of a Three scene"""
def __init__(self, filepath, options=None):
logger.debug("Scene().__init__(%s, %s)", filepath, options)
self._defaults = {
constants.METADATA: constants.DEFAULT_METADATA.copy(),
constants.GEOMETRIES: [],
constants.MATERIALS: [],
constants.IMAGES: [],
constants.TEXTURES: [],
constants.ANIMATION: []
}
base_classes.BaseScene.__init__(self, filepath, options or {})
source_file = api.scene_name()
if source_file:
self[constants.METADATA][constants.SOURCE_FILE] = source_file
self.__init_animation()
def __init_animation(self):
self[constants.ANIMATION].append({
constants.NAME: "default",
constants.FPS : context.scene.render.fps,
constants.KEYFRAMES: []
});
pass
@property
def valid_types(self):
"""
:return: list of valid node types
"""
valid_types = [api.constants.MESH]
if self.options.get(constants.HIERARCHY, False):
valid_types.append(api.constants.EMPTY)
if self.options.get(constants.CAMERAS):
logger.info("Adding cameras to valid object types")
valid_types.append(api.constants.CAMERA)
if self.options.get(constants.LIGHTS):
logger.info("Adding lights to valid object types")
valid_types.append(api.constants.LAMP)
return valid_types
def geometry(self, value):
"""Find a geometry node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().geometry(%s)", value)
return _find_node(value, self[constants.GEOMETRIES])
def image(self, value):
"""Find a image node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().image%s)", value)
return _find_node(value, self[constants.IMAGES])
def material(self, value):
"""Find a material node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().material(%s)", value)
return _find_node(value, self[constants.MATERIALS])
def parse(self):
"""Execute the parsing of the scene"""
logger.debug("Scene().parse()")
if self.options.get(constants.MAPS):
self._parse_textures()
if self.options.get(constants.MATERIALS):
self._parse_materials()
self._parse_geometries()
self._parse_objects()
def texture(self, value):
"""Find a texture node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().texture(%s)", value)
return _find_node(value, self[constants.TEXTURES])
def write(self):
"""Write the parsed scene to disk."""
logger.debug("Scene().write()")
data = {}
embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
embed = self.options.get(constants.EMBED_GEOMETRY, True)
compression = self.options.get(constants.COMPRESSION)
extension = constants.EXTENSIONS.get(
compression,
constants.EXTENSIONS[constants.JSON])
export_dir = os.path.dirname(self.filepath)
for key, value in self.items():
if key == constants.GEOMETRIES:
geometries = []
for geom in value:
if not embed_anim:
geom.write_animation(export_dir)
geom_data = geom.copy()
if not embed:
geom_data.pop(constants.DATA)
url = 'geometry.%s%s' % (geom.node, extension)
geometry_file = os.path.join(export_dir, url)
geom.write(filepath=geometry_file)
geom_data[constants.URL] = os.path.basename(url)
geometries.append(geom_data)
data[key] = geometries
elif isinstance(value, list):
data[key] = []
for each in value:
data[key].append(each.copy())
elif isinstance(value, dict):
data[key] = value.copy()
io.dump(self.filepath, data, options=self.options)
if self.options.get(constants.EXPORT_TEXTURES) and not self.options.get(constants.EMBED_TEXTURES):
texture_folder = self.options.get(constants.TEXTURE_FOLDER)
for geo in self[constants.GEOMETRIES]:
logger.info("Copying textures from %s", geo.node)
geo.copy_textures(texture_folder)
def _parse_geometries(self):
"""Locate all geometry nodes and parse them"""
logger.debug("Scene()._parse_geometries()")
# this is an important step. please refer to the doc string
# on the function for more information
api.object.prep_meshes(self.options)
geometries = []
# now iterate over all the extracted mesh nodes and parse each one
for mesh in api.object.extracted_meshes():
logger.info("Parsing geometry %s", mesh)
geo = geometry.Geometry(mesh, self)
geo.parse()
geometries.append(geo)
logger.info("Added %d geometry nodes", len(geometries))
self[constants.GEOMETRIES] = geometries
def _parse_materials(self):
"""Locate all non-orphaned materials and parse them"""
logger.debug("Scene()._parse_materials()")
materials = []
for material_name in api.material.used_materials():
logger.info("Parsing material %s", material_name)
materials.append(material.Material(material_name, parent=self))
logger.info("Added %d material nodes", len(materials))
self[constants.MATERIALS] = materials
def _parse_objects(self):
"""Locate all valid objects in the scene and parse them"""
logger.debug("Scene()._parse_objects()")
try:
scene_name = self[constants.METADATA][constants.SOURCE_FILE]
except KeyError:
scene_name = constants.SCENE
self[constants.OBJECT] = object_.Object(None, parent=self)
self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
self[constants.UUID] = utilities.id()
objects = []
if self.options.get(constants.HIERARCHY, False):
nodes = api.object.assemblies(self.valid_types, self.options)
else:
nodes = api.object.nodes(self.valid_types, self.options)
for node in nodes:
logger.info("Parsing object %s", node)
obj = object_.Object(node, parent=self[constants.OBJECT])
objects.append(obj)
logger.info("Added %d object nodes", len(objects))
self[constants.OBJECT][constants.CHILDREN] = objects
def _parse_textures(self):
"""Locate all non-orphaned textures and parse them"""
logger.debug("Scene()._parse_textures()")
textures = []
for texture_name in api.texture.textures():
logger.info("Parsing texture %s", texture_name)
tex_inst = texture.Texture(texture_name, self)
textures.append(tex_inst)
logger.info("Added %d texture nodes", len(textures))
self[constants.TEXTURES] = textures
def _find_node(value, manifest):
"""Find a node that matches either a name
or uuid value.
:param value: name or uuid
:param manifest: manifest of nodes to search
:type value: str
:type manifest: list
"""
for index in manifest:
uuid = index.get(constants.UUID) == value
name = index.node == value
if uuid or name:
return index
else:
logger.debug("No matching node for %s", value)
|
njpataki/weasl | refs/heads/qa | weasl/main.py | 1 | import argparse
from . import commands
def top_level_command():
COMMANDS = {'startproject': commands.StartProjectCommand(),
'startclassifier': commands.StartClassifier(),
'train': commands.Train()}
parser = argparse.ArgumentParser(
prog='weasl',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help='sub-command helps')
for command, handler in COMMANDS.iteritems():
command_subparser = subparsers.add_parser(command)
command_subparser.set_defaults(handler=handler)
command_subparser = handler.setup_clparser(command_subparser)
clargs = parser.parse_args()
clargs.handler.execute(clargs)
|
Qalthos/ansible | refs/heads/devel | lib/ansible/module_utils/json_utils.py | 89 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
for line in trailing_junk:
if line.strip():
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
break
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
|
semonte/intellij-community | refs/heads/master | python/lib/Lib/distutils/command/install_egg_info.py | 438 | """distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
|
zzzeek/sqlalchemy | refs/heads/master | lib/sqlalchemy/dialects/mysql/mariadb.py | 3 | from .base import MySQLDialect
class MariaDBDialect(MySQLDialect):
is_mariadb = True
supports_statement_cache = True
name = "mariadb"
def loader(driver):
driver_mod = __import__(
"sqlalchemy.dialects.mysql.%s" % driver
).dialects.mysql
driver_cls = getattr(driver_mod, driver).dialect
return type(
"MariaDBDialect_%s" % driver,
(
MariaDBDialect,
driver_cls,
),
{"supports_statement_cache": True},
)
|
blindroot/django | refs/heads/master | django/views/generic/__init__.py | 597 | from django.views.generic.base import RedirectView, TemplateView, View
from django.views.generic.dates import (
ArchiveIndexView, DateDetailView, DayArchiveView, MonthArchiveView,
TodayArchiveView, WeekArchiveView, YearArchiveView,
)
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView, DeleteView, FormView, UpdateView,
)
from django.views.generic.list import ListView
__all__ = [
'View', 'TemplateView', 'RedirectView', 'ArchiveIndexView',
'YearArchiveView', 'MonthArchiveView', 'WeekArchiveView', 'DayArchiveView',
'TodayArchiveView', 'DateDetailView', 'DetailView', 'FormView',
'CreateView', 'UpdateView', 'DeleteView', 'ListView', 'GenericViewError',
]
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
|
eduNEXT/edx-platform | refs/heads/master | openedx/core/djangoapps/credit/migrations/0008_creditrequirement_remove_order.py | 4 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('credit', '0007_creditrequirement_copy_values'),
]
operations = [
migrations.RemoveField(
model_name='creditrequirement',
name='order',
),
]
|
Pablo126/SSBW | refs/heads/master | Tarea4/tarea4/lib/python3.5/site-packages/django/contrib/gis/db/backends/postgis/introspection.py | 330 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.postgresql.introspection import DatabaseIntrospection
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
# Overridden from parent to include raster indices in retrieval.
# Raster indices have pg_index.indkey value 0 because they are an
# expression over the raster column through the ST_ConvexHull function.
# So the default query has to be adapted to include raster indices.
_get_indexes_query = """
SELECT DISTINCT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
LEFT JOIN pg_catalog.pg_type t ON t.oid = attr.atttypid
WHERE
c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND (
attr.attnum = idx.indkey[0] OR
(t.typname LIKE 'raster' AND idx.indkey = '0')
)
AND attr.attnum > 0
AND c.relname = %s"""
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
field_types = [
('geometry', 'GeometryField'),
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
('geography', ('GeometryField', {'geography': True})),
]
postgis_types = {}
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
cursor = self.connection.cursor()
try:
for field_type in field_types:
cursor.execute(oid_sql, (field_type[0],))
for result in cursor.fetchall():
postgis_types[result[0]] = field_type[1]
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# initialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
SaFi2266/odoo-rtl | refs/heads/8.0 | report_rtl/models/__init__.py | 2 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo RTL support
# Copyright (C) 2014 Mohammed Barsi.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report |
jasonzhong/linux | refs/heads/master | tools/perf/scripts/python/sctop.py | 1996 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
pgoeser/gnuradio | refs/heads/master | grc/python/Constants.py | 6 | """
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import stat
from gnuradio import gr
_gr_prefs = gr.prefs()
PYEXEC = os.environ.get('PYTHONW', _gr_prefs.get_string('grc', 'pythonw', ''))
#setup paths
PATH_SEP = ':'
DOCS_DIR = os.environ.get('GR_DOC_DIR', _gr_prefs.get_string('grc', 'doc_dir', ''))
HIER_BLOCKS_LIB_DIR = os.path.join(os.path.expanduser('~'), '.grc_gnuradio')
BLOCKS_DIRS = filter( #filter blank strings
lambda x: x, PATH_SEP.join([
os.environ.get('GRC_BLOCKS_PATH', ''),
_gr_prefs.get_string('grc', 'local_blocks_path', ''),
_gr_prefs.get_string('grc', 'global_blocks_path', ''),
]).split(PATH_SEP),
) + [HIER_BLOCKS_LIB_DIR]
#file creation modes
TOP_BLOCK_FILE_MODE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
HIER_BLOCK_FILE_MODE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH
#data files
DATA_DIR = os.path.dirname(__file__)
FLOW_GRAPH_TEMPLATE = os.path.join(DATA_DIR, 'flow_graph.tmpl')
BLOCK_DTD = os.path.join(DATA_DIR, 'block.dtd')
DEFAULT_FLOW_GRAPH = os.path.join(DATA_DIR, 'default_flow_graph.grc')
#coloring
COMPLEX_COLOR_SPEC = '#3399FF'
FLOAT_COLOR_SPEC = '#FF8C69'
INT_COLOR_SPEC = '#00FF99'
SHORT_COLOR_SPEC = '#FFFF66'
BYTE_COLOR_SPEC = '#FF66FF'
COMPLEX_VECTOR_COLOR_SPEC = '#3399AA'
FLOAT_VECTOR_COLOR_SPEC = '#CC8C69'
INT_VECTOR_COLOR_SPEC = '#00CC99'
SHORT_VECTOR_COLOR_SPEC = '#CCCC33'
BYTE_VECTOR_COLOR_SPEC = '#CC66CC'
ID_COLOR_SPEC = '#DDDDDD'
WILDCARD_COLOR_SPEC = '#FFFFFF'
MSG_COLOR_SPEC = '#777777'
|
joshimio/blog | refs/heads/master | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/terminal.py | 363 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
|
sgraham/nope | refs/heads/master | third_party/skia/tools/test_gpuveto.py | 142 | #!/usr/bin/env python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to test out suitableForGpuRasterization (via gpuveto)"""
import argparse
import glob
import os
import re
import subprocess
import sys
# Set the PYTHONPATH to include the tools directory.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import find_run_binary
def list_files(dir_or_file):
"""Returns a list of all the files from the provided argument
@param dir_or_file: either a directory or skp file
@returns a list containing the files in the directory or a single file
"""
files = []
for globbedpath in glob.iglob(dir_or_file): # useful on win32
if os.path.isdir(globbedpath):
for filename in os.listdir(globbedpath):
newpath = os.path.join(globbedpath, filename)
if os.path.isfile(newpath):
files.append(newpath)
elif os.path.isfile(globbedpath):
files.append(globbedpath)
return files
def execute_program(args):
"""Executes a process and waits for it to complete.
@param args: is passed into subprocess.Popen().
@returns a tuple of the process output (returncode, output)
"""
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = proc.communicate()
errcode = proc.returncode
return (errcode, output)
class GpuVeto(object):
def __init__(self):
self.bench_pictures = find_run_binary.find_path_to_program(
'bench_pictures')
sys.stdout.write('Running: %s\n' % (self.bench_pictures))
self.gpuveto = find_run_binary.find_path_to_program('gpuveto')
assert os.path.isfile(self.bench_pictures)
assert os.path.isfile(self.gpuveto)
self.indeterminate = 0
self.truePositives = 0
self.falsePositives = 0
self.trueNegatives = 0
self.falseNegatives = 0
def process_skps(self, dir_or_file):
for skp in enumerate(dir_or_file):
self.process_skp(skp[1])
sys.stdout.write('TP %d FP %d TN %d FN %d IND %d\n' % (self.truePositives,
self.falsePositives,
self.trueNegatives,
self.falseNegatives,
self.indeterminate))
def process_skp(self, skp_file):
assert os.path.isfile(skp_file)
#print skp_file
# run gpuveto on the skp
args = [self.gpuveto, '-r', skp_file]
returncode, output = execute_program(args)
if (returncode != 0):
return
if ('unsuitable' in output):
suitable = False
else:
assert 'suitable' in output
suitable = True
# run raster config
args = [self.bench_pictures, '-r', skp_file,
'--repeat', '20',
'--timers', 'w',
'--config', '8888']
returncode, output = execute_program(args)
if (returncode != 0):
return
matches = re.findall('[\d]+\.[\d]+', output)
if len(matches) != 1:
return
rasterTime = float(matches[0])
# run gpu config
args2 = [self.bench_pictures, '-r', skp_file,
'--repeat', '20',
'--timers', 'w',
'--config', 'gpu']
returncode, output = execute_program(args2)
if (returncode != 0):
return
matches = re.findall('[\d]+\.[\d]+', output)
if len(matches) != 1:
return
gpuTime = float(matches[0])
# happens if page is too big it will not render
if 0 == gpuTime:
return
tolerance = 0.05
tol_range = tolerance * gpuTime
if rasterTime > gpuTime - tol_range and rasterTime < gpuTime + tol_range:
result = "NONE"
self.indeterminate += 1
elif suitable:
if gpuTime < rasterTime:
self.truePositives += 1
result = "TP"
else:
self.falsePositives += 1
result = "FP"
else:
if gpuTime < rasterTime:
self.falseNegatives += 1
result = "FN"
else:
self.trueNegatives += 1
result = "TN"
sys.stdout.write('%s: gpuveto: %d raster %.2f gpu: %.2f Result: %s\n' % (
skp_file, suitable, rasterTime, gpuTime, result))
def main(main_argv):
parser = argparse.ArgumentParser()
parser.add_argument('--skp_path',
help='Path to the SKP(s). Can either be a directory ' \
'containing SKPs or a single SKP.',
required=True)
args = parser.parse_args()
GpuVeto().process_skps(list_files(args.skp_path))
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
|
adamreis/nyc-jazz | refs/heads/master | src/lib/wtforms/compat.py | 119 | import sys
if sys.version_info[0] >= 3:
text_type = str
string_types = str,
iteritems = lambda o: o.items()
itervalues = lambda o: o.values()
izip = zip
else:
text_type = unicode
string_types = basestring,
iteritems = lambda o: o.iteritems()
itervalues = lambda o: o.itervalues()
from itertools import izip
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
|
SravanthiSinha/edx-platform | refs/heads/master | common/test/acceptance/tests/lms/test_lms_problems.py | 11 | # -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from textwrap import dedent
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
USERNAME = "joe_student"
EMAIL = "joe@example.com"
def setUp(self):
super(ProblemsTest, self).setUp()
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<textline label="Enter the annual ROI" trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: hint')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): demand-hint2')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}}
],
actual_events)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<choicegroup label="Answer this?" type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
# Verify Mathjax have been rendered
self.assertTrue(problem_page.mathjax_rendered_in_problem, "MathJax did not rendered in body")
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertIn("Hint (1 of 2): mathjax should work1", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
|
krzychb/rtd-test-bed | refs/heads/master | components/nghttp/nghttp2/python/wsgi.py | 20 | # nghttp2 - HTTP/2.0 C Library
# Copyright (c) 2013 Tatsuhiro Tsujikawa
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import io
import sys
from urllib.parse import urlparse
import nghttp2
def _dance_decode(b):
# TODO faster than looping through and mod-128'ing all unicode points?
return b.decode('utf-8').encode('latin1').decode('latin1')
class WSGIContainer(nghttp2.BaseRequestHandler):
_BASE_ENVIRON = {
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http', # FIXME
'wsgi.multithread': True, # TODO I think?
'wsgi.multiprocess': False, # TODO no idea
'wsgi.run_once': True, # TODO now I'm just guessing
'wsgi.errors': sys.stderr, # TODO will work for testing - is this even used by any frameworks?
}
def __init__(self, app, *args, **kwargs):
super(WSGIContainer, self).__init__(*args, **kwargs)
self.app = app
self.chunks = []
def on_data(self, chunk):
self.chunks.append(chunk)
def on_request_done(self):
environ = WSGIContainer._BASE_ENVIRON.copy()
parsed = urlparse(self.path)
environ['wsgi.input'] = io.BytesIO(b''.join(self.chunks))
for name, value in self.headers:
mangled_name = b'HTTP_' + name.replace(b'-', b'_').upper()
environ[_dance_decode(mangled_name)] = _dance_decode(value)
environ.update(dict(
REQUEST_METHOD=_dance_decode(self.method),
# TODO SCRIPT_NAME? like APPLICATION_ROOT in Flask...
PATH_INFO=_dance_decode(parsed.path),
QUERY_STRING=_dance_decode(parsed.query),
CONTENT_TYPE=environ.get('HTTP_CONTENT_TYPE', ''),
CONTENT_LENGTH=environ.get('HTTP_CONTENT_LENGTH', ''),
SERVER_NAME=_dance_decode(self.host),
SERVER_PORT='', # FIXME probably requires changes in nghttp2
SERVER_PROTOCOL='HTTP/2.0',
))
response_status = [None]
response_headers = [None]
response_chunks = []
def start_response(status, headers, exc_info=None):
if response_status[0] is not None:
raise AssertionError('Response already started')
exc_info = None # avoid dangling circular ref - TODO is this necessary? borrowed from snippet in WSGI spec
response_status[0] = status
response_headers[0] = headers
# TODO handle exc_info
return lambda chunk: response_chunks.append(chunk)
# TODO technically, this breaks the WSGI spec by buffering the status,
# headers, and body until all are completely output from the app before
# writing the response, but it looks like nghttp2 doesn't support any
# other way for now
# TODO disallow yielding/returning before start_response is called
response_chunks.extend(self.app(environ, start_response))
response_body = b''.join(response_chunks)
# TODO automatically set content-length if not provided
self.send_response(
status=response_status[0],
headers=response_headers[0],
body=response_body,
)
def wsgi_app(app):
return lambda *args, **kwargs: WSGIContainer(app, *args, **kwargs)
if __name__ == '__main__':
import ssl
from werkzeug.testapp import test_app
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.options = ssl.OP_ALL | ssl.OP_NO_SSLv2
ssl_ctx.load_cert_chain('server.crt', 'server.key')
server = nghttp2.HTTP2Server(('127.0.0.1', 8443), wsgi_app(test_app),
ssl=ssl_ctx)
server.serve_forever()
|
indictranstech/focal-erpnext | refs/heads/develop | erpnext/manufacturing/doctype/production_order/test_production_order.py | 5 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
class TestProductionOrder(unittest.TestCase):
def test_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry("_Test Item", None, "Stores - _TC", 100, 100)
test_stock_entry.make_stock_entry("_Test Item Home Desktop 100", None, "Stores - _TC", 100, 100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer", 4))
for d in s.get("mtn_details"):
d.s_warehouse = "Stores - _TC"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.test_planned_qty()
test_stock_entry.make_stock_entry("_Test Item", None, "_Test Warehouse - _TC", 100, 100)
test_stock_entry.make_stock_entry("_Test Item Home Desktop 100", None, "_Test Warehouse - _TC", 100, 100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
test_records = frappe.get_test_records('Production Order')
|
saukrIppl/seahub | refs/heads/master | thirdpart/Django-1.8.10-py2.7.egg/django/utils/datastructures.py | 87 | import copy
import warnings
from collections import OrderedDict
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
warnings.warn('`MergeDict` is deprecated, use `dict.update()` '
'instead.', RemovedInDjango19Warning, 2)
self.dicts = dicts
def __bool__(self):
return any(self.dicts)
def __nonzero__(self):
return type(self).__bool__(self)
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError(key)
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
# This is used by MergeDicts of MultiValueDicts.
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_:
return dict_.getlist(key)
return []
def _iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in six.iteritems(dict_):
k = item[0]
if k in seen:
continue
seen.add(k)
yield item
def _iterkeys(self):
for k, v in self._iteritems():
yield k
def _itervalues(self):
for k, v in self._iteritems():
yield v
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = _iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
warnings.warn(
"SortedDict is deprecated and will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in six.iteritems(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join('%r: %r' % (k, v) for k, v in six.iteritems(self))
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class OrderedSet(object):
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict(((x, None) for x in iterable) if iterable else [])
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(repr(key))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = {k: self.getlist(k) for k in self}
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def _iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def _iterlists(self):
"""Yields (key, list) pairs."""
return six.iteritems(super(MultiValueDict, self))
def _itervalues(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
if six.PY3:
items = _iteritems
lists = _iterlists
values = _itervalues
else:
iteritems = _iteritems
iterlists = _iterlists
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def lists(self):
return list(self.iterlists())
def values(self):
return list(self.itervalues())
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
boh1996/LectioAPI | refs/heads/master | scrapers/groups.py | 1 | #!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import functions
def groups ( config ):
groupsList = []
url = urls.groups_list.replace("{{SCHOOL_ID}}", str(config["school_id"])).replace("{{BRANCH_ID}}", str(config["branch_id"]))
response = proxy.session.get(url)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_contenttbl"}) is None or soup.find("table", attrs={"id" : "m_Content_contenttbl2"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
buildInGroupRows = soup.find("table", attrs={"id" : "m_Content_contenttbl"}).findAll("a")
idProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/SkemaNy.aspx\?type=(?P<type_name>.*)&holdelementid=(?P<group_id>.*)")
for row in buildInGroupRows:
idGroups = idProg.match(row["href"])
groupsList.append({
"school_id" : config["school_id"],
"branch_id" : config["branch_id"],
"name" : unicode(row.text),
"group_id" : idGroups.group("group_id") if "group_id" in idGroups.groupdict() else "",
"type" : idGroups.group("type_name") if "type_name" in idGroups.groupdict() else "",
"group_type" : "build_in",
"context_card_id" : "HE" + idGroups.group("group_id") if "group_id" in idGroups.groupdict() else ""
})
ownGroupRows = soup.find("table", attrs={"id" : "m_Content_contenttbl2"}).findAll("a")
for row in ownGroupRows:
idGroups = idProg.match(row["href"])
groupsList.append({
"school_id" : config["school_id"],
"branch_id" : config["branch_id"],
"name" : unicode(row.text),
"group_id" : idGroups.group("group_id") if "group_id" in idGroups.groupdict() else "",
"type" : idGroups.group("type_name") if "type_name" in idGroups.groupdict() else "",
"group_type" : "own_group",
"context_card_id" : "HE" + idGroups.group("group_id") if "group_id" in idGroups.groupdict() else ""
})
return {
"status" : "ok",
"groups" : groupsList,
"term" : {
"value" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"years_string" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
}
} |
Limags/MissionPlanner | refs/heads/master | Lib/HTMLParser.py | 50 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
entitydefs = None
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:], 16)
else:
c = int(s)
return unichr(c)
except ValueError:
return '&#'+s+';'
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
import htmlentitydefs
if HTMLParser.entitydefs is None:
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
try:
return self.entitydefs[s]
except KeyError:
return '&'+s+';'
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
|
levythu/swift | refs/heads/master | test/unit/account/test_reaper.py | 7 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
import shutil
import tempfile
import unittest
from logging import DEBUG
from mock import patch, call, DEFAULT
import six
from swift.account import reaper
from swift.account.backend import DATADIR
from swift.common.exceptions import ClientException
from swift.common.utils import normalize_timestamp
from test import unit
from swift.common.storage_policy import StoragePolicy, POLICIES
class FakeLogger(object):
def __init__(self, *args, **kwargs):
self.inc = {'return_codes.4': 0,
'return_codes.2': 0,
'objects_failures': 0,
'objects_deleted': 0,
'objects_remaining': 0,
'objects_possibly_remaining': 0,
'containers_failures': 0,
'containers_deleted': 0,
'containers_remaining': 0,
'containers_possibly_remaining': 0}
self.exp = []
def info(self, msg, *args):
self.msg = msg
def error(self, msg, *args):
self.msg = msg
def timing_since(*args, **kwargs):
pass
def getEffectiveLevel(self):
return DEBUG
def exception(self, *args):
self.exp.append(args)
def increment(self, key):
self.inc[key] += 1
class FakeBroker(object):
def __init__(self):
self.info = {}
def get_info(self):
return self.info
class FakeAccountBroker(object):
def __init__(self, containers):
self.containers = containers
self.containers_yielded = []
def get_info(self):
info = {'account': 'a',
'delete_timestamp': time.time() - 10}
return info
def list_containers_iter(self, *args):
for cont in self.containers:
yield cont, None, None, None
def is_status_deleted(self):
return True
def empty(self):
return False
class FakeRing(object):
def __init__(self):
self.nodes = [{'id': '1',
'ip': '10.10.10.1',
'port': 6002,
'device': None},
{'id': '2',
'ip': '10.10.10.2',
'port': 6002,
'device': None},
{'id': '3',
'ip': '10.10.10.3',
'port': 6002,
'device': None},
]
def get_nodes(self, *args, **kwargs):
return ('partition', self.nodes)
def get_part_nodes(self, *args, **kwargs):
return self.nodes
acc_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
cont_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@unit.patch_policies([StoragePolicy(0, 'zero', False,
object_ring=unit.FakeRing()),
StoragePolicy(1, 'one', True,
object_ring=unit.FakeRing(replicas=4))])
class TestReaper(unittest.TestCase):
def setUp(self):
self.to_delete = []
self.myexp = ClientException("", http_host=None,
http_port=None,
http_device=None,
http_status=404,
http_reason=None
)
def tearDown(self):
for todel in self.to_delete:
shutil.rmtree(todel)
def fake_direct_delete_object(self, *args, **kwargs):
if self.amount_fail < self.max_fail:
self.amount_fail += 1
raise self.myexp
def fake_direct_delete_container(self, *args, **kwargs):
if self.amount_delete_fail < self.max_delete_fail:
self.amount_delete_fail += 1
raise self.myexp
def fake_direct_get_container(self, *args, **kwargs):
if self.get_fail:
raise self.myexp
objects = [{'name': 'o1'},
{'name': 'o2'},
{'name': six.text_type('o3')},
{'name': ''}]
return None, objects
def fake_container_ring(self):
return FakeRing()
def fake_reap_object(self, *args, **kwargs):
if self.reap_obj_fail:
raise Exception
def prepare_data_dir(self, ts=False):
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
path = os.path.join(path, '100',
'a86', 'a8c682d2472e1720f2d81ff8993aba6')
os.makedirs(path)
suffix = 'db'
if ts:
suffix = 'ts'
with open(os.path.join(path, 'a8c682203aba6.%s' % suffix), 'w') as fd:
fd.write('')
return devices_path
def init_reaper(self, conf=None, myips=None, fakelogger=False):
if conf is None:
conf = {}
if myips is None:
myips = ['10.10.10.1']
r = reaper.AccountReaper(conf)
r.stats_return_codes = {}
r.stats_containers_deleted = 0
r.stats_containers_remaining = 0
r.stats_containers_possibly_remaining = 0
r.stats_objects_deleted = 0
r.stats_objects_remaining = 0
r.stats_objects_possibly_remaining = 0
r.myips = myips
if fakelogger:
r.logger = unit.debug_logger('test-reaper')
return r
def fake_reap_account(self, *args, **kwargs):
self.called_amount += 1
def fake_account_ring(self):
return FakeRing()
def test_creation(self):
# later config should be extended to assert more config options
r = reaper.AccountReaper({'node_timeout': '3.5'})
self.assertEqual(r.node_timeout, 3.5)
def test_delay_reaping_conf_default(self):
r = reaper.AccountReaper({})
self.assertEqual(r.delay_reaping, 0)
r = reaper.AccountReaper({'delay_reaping': ''})
self.assertEqual(r.delay_reaping, 0)
def test_delay_reaping_conf_set(self):
r = reaper.AccountReaper({'delay_reaping': '123'})
self.assertEqual(r.delay_reaping, 123)
def test_delay_reaping_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'delay_reaping': 'abc'})
def test_reap_warn_after_conf_set(self):
conf = {'delay_reaping': '2', 'reap_warn_after': '3'}
r = reaper.AccountReaper(conf)
self.assertEqual(r.reap_not_done_after, 5)
def test_reap_warn_after_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'reap_warn_after': 'abc'})
def test_reap_delay(self):
time_value = [100]
def _time():
return time_value[0]
time_orig = reaper.time
try:
reaper.time = _time
r = reaper.AccountReaper({'delay_reaping': '10'})
b = FakeBroker()
b.info['delete_timestamp'] = normalize_timestamp(110)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(100)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(90)
self.assertFalse(r.reap_account(b, 0, None))
# KeyError raised immediately as reap_account tries to get the
# account's name to do the reaping.
b.info['delete_timestamp'] = normalize_timestamp(89)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
b.info['delete_timestamp'] = normalize_timestamp(1)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
finally:
reaper.time = time_orig
def test_reap_object(self):
conf = {
'mount_check': 'false',
}
r = reaper.AccountReaper(conf, logger=unit.debug_logger())
mock_path = 'swift.account.reaper.direct_delete_object'
for policy in POLICIES:
r.reset_stats()
with patch(mock_path) as fake_direct_delete:
with patch('swift.account.reaper.time') as mock_time:
mock_time.return_value = 1429117638.86767
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
mock_time.assert_called_once_with()
for i, call_args in enumerate(
fake_direct_delete.call_args_list):
cnode = cont_nodes[i % len(cont_nodes)]
host = '%(ip)s:%(port)s' % cnode
device = cnode['device']
headers = {
'X-Container-Host': host,
'X-Container-Partition': 'partition',
'X-Container-Device': device,
'X-Backend-Storage-Policy-Index': policy.idx,
'X-Timestamp': '1429117638.86767'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0,
'a', 'c', 'o',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(policy.object_ring.replicas - 1, i)
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas)
def test_reap_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.amount_fail = 0
self.max_fail = 1
policy = random.choice(list(POLICIES))
with patch('swift.account.reaper.direct_delete_object',
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
# IMHO, the stat handling in the node loop of reap object is
# over indented, but no one has complained, so I'm not inclined
# to move it. However it's worth noting we're currently keeping
# stats on deletes per *replica* - which is rather obvious from
# these tests, but this results is surprising because of some
# funny logic to *skip* increments on successful deletes of
# replicas until we have more successful responses than
# failures. This means that while the first replica doesn't
# increment deleted because of the failure, the second one
# *does* get successfully deleted, but *also does not* increment
# the counter (!?).
#
# In the three replica case this leaves only the last deleted
# object incrementing the counter - in the four replica case
# this leaves the last two.
#
# Basically this test will always result in:
# deleted == num_replicas - 2
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas - 2)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 1)
def test_reap_object_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2)
self.assertEqual(r.stats_objects_deleted, 0)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 0)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container(self):
policy = random.choice(list(POLICIES))
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': policy.idx}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
with patch('swift.account.reaper.time') as mock_time:
mock_time.side_effect = [1429117638.86767, 1429117639.67676]
r.reap_container('a', 'partition', acc_nodes, 'c')
# verify calls to direct_delete_object
mock_calls = mocks['direct_delete_object'].call_args_list
self.assertEqual(policy.object_ring.replicas, len(mock_calls))
for call_args in mock_calls:
_args, kwargs = call_args
self.assertEqual(kwargs['headers']
['X-Backend-Storage-Policy-Index'],
policy.idx)
self.assertEqual(kwargs['headers']
['X-Timestamp'],
'1429117638.86767')
# verify calls to direct_delete_container
self.assertEqual(mocks['direct_delete_container'].call_count, 3)
for i, call_args in enumerate(
mocks['direct_delete_container'].call_args_list):
anode = acc_nodes[i % len(acc_nodes)]
host = '%(ip)s:%(port)s' % anode
device = anode['device']
headers = {
'X-Account-Host': host,
'X-Account-Partition': 'partition',
'X-Account-Device': device,
'X-Account-Override-Deleted': 'yes',
'X-Timestamp': '1429117639.67676'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas)
def test_reap_container_get_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = True
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 0
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 1)
self.assertEqual(r.stats_containers_deleted, 1)
def test_reap_container_partial_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 2
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 2)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 3
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 3)
self.assertEqual(r.stats_containers_remaining, 1)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': 2}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_lines_for_level('error'), [
'ERROR: invalid storage policy index: 2'])
def fake_reap_container(self, *args, **kwargs):
self.called_amount += 1
self.r.stats_containers_deleted = 1
self.r.stats_objects_deleted = 1
self.r.stats_containers_remaining = 1
self.r.stats_objects_remaining = 1
self.r.stats_containers_possibly_remaining = 1
self.r.stats_objects_possibly_remaining = 1
def test_reap_account(self):
containers = ('c1', 'c2', 'c3', '')
broker = FakeAccountBroker(containers)
self.called_amount = 0
self.r = r = self.init_reaper({}, fakelogger=True)
r.start_time = time.time()
with patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring):
nodes = r.get_account_ring().get_part_nodes()
for container_shard, node in enumerate(nodes):
self.assertTrue(
r.reap_account(broker, 'partition', nodes,
container_shard=container_shard))
self.assertEqual(self.called_amount, 4)
info_lines = r.logger.get_lines_for_level('info')
self.assertEqual(len(info_lines), 6)
for start_line, stat_line in zip(*[iter(info_lines)] * 2):
self.assertEqual(start_line, 'Beginning pass on account a')
self.assertTrue(stat_line.find('1 containers deleted'))
self.assertTrue(stat_line.find('1 objects deleted'))
self.assertTrue(stat_line.find('1 containers remaining'))
self.assertTrue(stat_line.find('1 objects remaining'))
self.assertTrue(stat_line.find('1 containers possibly remaining'))
self.assertTrue(stat_line.find('1 objects possibly remaining'))
def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple())
self.r = r = self.init_reaper({}, fakelogger=True)
self.called_amount = 0
r.start_time = time.time()
with patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertTrue(r.logger.get_lines_for_level(
'info')[-1].startswith('Completed pass'))
self.assertEqual(self.called_amount, 0)
def test_reap_device(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf)
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 1)
def test_reap_device_with_ts(self):
devices = self.prepare_data_dir(ts=True)
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf=conf)
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_not_my_ip(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.1.2'])
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_sharding(self):
devices = self.prepare_data_dir()
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_shard_used = [-1]
def fake_reap_account(*args, **kwargs):
container_shard_used[0] = kwargs.get('container_shard')
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda1')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 1)
def test_reap_account_with_sharding(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_reaped = [0]
def fake_list_containers_iter(self, *args):
for container in self.containers:
if container in self.containers_yielded:
continue
yield container, None, None, None
self.containers_yielded.append(container)
def fake_reap_container(self, account, account_partition,
account_nodes, container):
container_reaped[0] += 1
fake_ring = FakeRing()
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch(
'swift.account.reaper.AccountBroker.list_containers_iter',
fake_list_containers_iter), \
patch('swift.account.reaper.AccountReaper.reap_container',
fake_reap_container):
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 0)
self.assertEqual(container_reaped[0], 1)
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
container_reaped[0] = 0
r.reap_account(fake_broker, 10, fake_ring.nodes, 1)
self.assertEqual(container_reaped[0], 2)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 2)
self.assertEqual(container_reaped[0], 0)
def test_run_once(self):
def prepare_data_dir():
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
return devices_path
def init_reaper(devices):
r = reaper.AccountReaper({'devices': devices})
return r
devices = prepare_data_dir()
r = init_reaper(devices)
with patch('swift.account.reaper.ismount', lambda x: True):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertEqual(foo.called, 1)
with patch('swift.account.reaper.ismount', lambda x: False):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertFalse(foo.called)
def test_run_forever(self):
def fake_sleep(val):
self.val = val
def fake_random():
return 1
def fake_run_once():
raise Exception('exit')
def init_reaper():
r = reaper.AccountReaper({'interval': 1})
r.run_once = fake_run_once
return r
r = init_reaper()
with patch('swift.account.reaper.sleep', fake_sleep):
with patch('swift.account.reaper.random.random', fake_random):
try:
r.run_forever()
except Exception as err:
pass
self.assertEqual(self.val, 1)
self.assertEqual(str(err), 'exit')
if __name__ == '__main__':
unittest.main()
|
iproduct/course-social-robotics | refs/heads/master | 11-dnn-keras/venv/Lib/site-packages/defusedxml/expatbuilder.py | 3 | # defusedxml
#
# Copyright (c) 2013 by Christian Heimes <christian@python.org>
# Licensed to PSF under a Contributor Agreement.
# See https://www.python.org/psf/license for licensing details.
"""Defused xml.dom.expatbuilder
"""
from __future__ import print_function, absolute_import
from xml.dom.expatbuilder import ExpatBuilder as _ExpatBuilder
from xml.dom.expatbuilder import Namespaces as _Namespaces
from .common import DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden
__origin__ = "xml.dom.expatbuilder"
class DefusedExpatBuilder(_ExpatBuilder):
"""Defused document builder"""
def __init__(
self, options=None, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
_ExpatBuilder.__init__(self, options)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
self.forbid_external = forbid_external
def defused_start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def defused_entity_decl(
self, name, is_parameter_entity, value, base, sysid, pubid, notation_name
):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def defused_unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) # pragma: no cover
def defused_external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def install(self, parser):
_ExpatBuilder.install(self, parser)
if self.forbid_dtd:
parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl
if self.forbid_entities:
# if self._options.entities:
parser.EntityDeclHandler = self.defused_entity_decl
parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl
if self.forbid_external:
parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler
class DefusedExpatBuilderNS(_Namespaces, DefusedExpatBuilder):
"""Defused document builder that supports namespaces."""
def install(self, parser):
DefusedExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = self.start_namespace_decl_handler
def reset(self):
DefusedExpatBuilder.reset(self)
self._initNamespaces()
def parse(file, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
build_builder = DefusedExpatBuilderNS
else:
build_builder = DefusedExpatBuilder
builder = build_builder(
forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
)
if isinstance(file, str):
fp = open(file, "rb")
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(
string, namespaces=True, forbid_dtd=False, forbid_entities=True, forbid_external=True
):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
build_builder = DefusedExpatBuilderNS
else:
build_builder = DefusedExpatBuilder
builder = build_builder(
forbid_dtd=forbid_dtd, forbid_entities=forbid_entities, forbid_external=forbid_external
)
return builder.parseString(string)
|
RydrDojo/Ridr_app | refs/heads/master | pylotVenv/lib/python2.7/site-packages/flask/testsuite/test_apps/moduleapp/apps/admin/__init__.py | 629 | from flask import Module, render_template
admin = Module(__name__, url_prefix='/admin')
@admin.route('/')
def index():
return render_template('admin/index.html')
@admin.route('/index2')
def index2():
return render_template('./admin/index.html')
|
turtlemonvh/traffic-monitor | refs/heads/master | setup.py | 1 | from distutils.core import setup
import py2exe
"""
After adding py2exe to your python distribution (using eas_install with the executable to install into a specific version of python), build with:
python setup.py py2exe
The executable file is placed in /dist/route_test.exe
See more information about py2exe here:
http://www.py2exe.org/
http://www.py2exe.org/index.cgi/Tutorial
"""
setup(console=['route_test.py']) |
garg10may/youtube-dl | refs/heads/master | youtube_dl/extractor/eagleplatform.py | 65 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
)
class EaglePlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
eagleplatform:(?P<custom_host>[^/]+):|
https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id=
)
(?P<id>\d+)
'''
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
'md5': '0b7994faa2bd5c0f69a3db6db28d078d',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
}, {
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
'md5': '6c2ebeab03b739597ce8d86339d5a905',
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'skip': 'Georestricted',
}]
def _handle_error(self, response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
response = super(EaglePlatformIE, self)._download_json(url_or_request, video_id, note)
self._handle_error(response)
return response
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
player_data = self._download_json(
'http://%s/api/player_data?id=%s' % (host, video_id), video_id)
media = player_data['data']['playlist']['viewports'][0]['medialist'][0]
title = media['title']
description = media.get('description')
thumbnail = media.get('snapshot')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
age_restriction = media.get('age_restriction')
age_limit = None
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
m3u8_data = self._download_json(
media['sources']['secure_m3u8']['auto'],
video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
m3u8_data['data'][0], video_id,
'mp4', entry_protocol='m3u8_native')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
|
relsi/pautai | refs/heads/master | models/menu.py | 17 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="navbar-brand",_href="http://www.web2py.com/",
_id="web2py-logo")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <you@example.com>'
response.meta.description = 'a cool new app'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, '#', [
(T('Design'), False, URL('admin', 'default', 'design/%s' % app)),
LI(_class="divider"),
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Config.ini'), False,
URL(
'admin', 'default', 'edit/%s/private/appconfig.ini' % app)),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py-bootstrap3.css' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, '#', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
]),
(T('Documentation'), False, '#', [
(T('Online book'), False, 'http://www.web2py.com/book'),
LI(_class="divider"),
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T('Helping web2py'), False,
'http://www.web2py.com/book/default/chapter/15'),
(T("Buy web2py's book"), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
|
mexeniz/django-oscar | refs/heads/master | src/oscar/templatetags/category_tags.py | 28 | from django import template
from oscar.core.loading import get_model
register = template.Library()
Category = get_model('catalogue', 'category')
@register.assignment_tag(name="category_tree")
def get_annotated_list(depth=None, parent=None):
"""
Gets an annotated list from a tree branch.
Borrows heavily from treebeard's get_annotated_list
"""
# 'depth' is the backwards-compatible name for the template tag,
# 'max_depth' is the better variable name.
max_depth = depth
annotated_categories = []
start_depth, prev_depth = (None, None)
if parent:
categories = parent.get_descendants()
if max_depth is not None:
max_depth += parent.get_depth()
else:
categories = Category.get_tree()
info = {}
for node in categories:
node_depth = node.get_depth()
if start_depth is None:
start_depth = node_depth
if max_depth is not None and node_depth > max_depth:
continue
# Update previous node's info
info['has_children'] = prev_depth is None or node_depth > prev_depth
if prev_depth is not None and node_depth < prev_depth:
info['num_to_close'] = list(range(0, prev_depth - node_depth))
info = {'num_to_close': [],
'level': node_depth - start_depth}
annotated_categories.append((node, info,))
prev_depth = node_depth
if prev_depth is not None:
# close last leaf
info['num_to_close'] = list(range(0, prev_depth - start_depth))
info['has_children'] = prev_depth > prev_depth
return annotated_categories
|
felixma/nova | refs/heads/master | nova/api/openstack/compute/volumes.py | 14 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import volumes as volumes_schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
from nova import volume
ALIAS = "os-volumes"
authorize = extensions.os_compute_authorizer(ALIAS)
authorize_attach = extensions.os_compute_authorizer('os-volumes-attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'volume': _translate_volume_detail_view(context, vol)}
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
try:
self.volume_api.delete(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@extensions.expected_errors((400, 403, 404))
@validation.schema(volumes_schema.create)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
vol = body['volume']
vol_type = vol.get('volume_type')
metadata = vol.get('metadata')
snapshot_id = vol.get('snapshot_id', None)
if snapshot_id is not None:
try:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
availability_zone = vol.get('availability_zone')
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as err:
raise exc.HTTPForbidden(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.volume_api = volume.API()
super(VolumeAttachmentController, self).__init__()
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
assigned_mountpoint = None
for bdm in bdms:
if bdm.volume_id == volume_id:
assigned_mountpoint = bdm.device_name
break
if assigned_mountpoint is None:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance.uuid,
assigned_mountpoint)}
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.create_volume_attachment)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
instance = common.get_instance(self.compute_api, context, server_id)
try:
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume', server_id)
except (exception.InvalidVolume,
exception.InvalidDevicePath) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.update_volume_attachment)
def update(self, req, server_id, id, body):
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='update')
old_volume_id = id
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume_id = body['volumeAttachment']['volumeId']
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume', server_id)
if not found:
msg = _("The volume was either invalid or not attached to the "
"instance.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
found = False
try:
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume', server_id)
if not found:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm.volume_id:
results.append(entity_maker(bdm.volume_id,
bdm.instance_uuid,
bdm.device_name))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
self.volume_api.delete_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@extensions.expected_errors(400)
@validation.schema(volumes_schema.snapshot_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.V21APIExtensionBase):
"""Volumes support."""
name = "Volumes"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS, VolumeController(), collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension(
'os-snapshots', SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
def get_controller_extensions(self):
return []
|
squarerootfury/distrochooser | refs/heads/master | backend/distrochooser/migrations/0043_auto_20191213_1534.py | 2 | # Generated by Django 2.2.3 on 2019-12-13 14:34
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('distrochooser', '0042_auto_20191213_1533'),
]
operations = [
migrations.AlterField(
model_name='usersession',
name='dateTime',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 13, 15, 34, 44, 368201)),
),
migrations.AddIndex(
model_name='distribution',
index=models.Index(fields=['name'], name='distrochoos_name_04f653_idx'),
),
migrations.AddIndex(
model_name='distribution',
index=models.Index(fields=['identifier'], name='distrochoos_identif_107970_idx'),
),
migrations.AddIndex(
model_name='selectionreason',
index=models.Index(fields=['resultSelection'], name='distrochoos_resultS_eb35e3_idx'),
),
]
|
abtink/openthread | refs/heads/master | tests/scripts/thread-cert/Cert_5_6_03_NetworkDataRegisterAfterAttachLeader.py | 4 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_DATA_RESPONSE, MLE_CHILD_ID_RESPONSE, MLE_CHILD_UPDATE_REQUEST, ADDR_SOL_URI, MLE_CHILD_UPDATE_RESPONSE, MODE_TLV, LEADER_DATA_TLV, ROUTE64_TLV, SOURCE_ADDRESS_TLV, ACTIVE_TIMESTAMP_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ADDRESS_REGISTRATION_TLV, LINK_LOCAL_ALL_NODES_MULTICAST_ADDRESS
from pktverify.packet_verifier import PacketVerifier
from pktverify.addrs import Ipv6Addr
LEADER = 1
ROUTER = 2
ED1 = 3
SED1 = 4
MTDS = [ED1, SED1]
class Cert_5_6_3_NetworkDataRegisterAfterAttachLeader(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, ED1, SED1]
},
ED1: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'allowlist': [ROUTER]
},
SED1: {
'name': 'SED',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(4)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[LEADER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[LEADER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[LEADER].register_netdata()
# Set lowpan context of sniffer
self.simulator.set_lowpan_context(1, '2001:2:0:1::/64')
self.simulator.set_lowpan_context(2, '2001:2:0:2::/64')
self.simulator.go(10)
addrs = self.nodes[ED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[SED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
SED = pv.vars['SED']
_rpkts = pkts.filter_wpan_src64(ROUTER)
_rpkts_med = _rpkts.copy()
_rpkts_sed = _rpkts.copy()
# Step 3: The DUT MUST multicast a MLE Data Response for each
# prefix sent by the Leader (Prefix 1 and Prefix 2)
_rpkts.filter_mle_cmd(MLE_DATA_RESPONSE).filter_ipv6_dst(LINK_LOCAL_ALL_NODES_MULTICAST_ADDRESS).must_next(
).must_verify(lambda p: {Ipv6Addr('2001:2:0:1::'), Ipv6Addr('2001:2:0:2::')} == set(p.thread_nwd.tlv.prefix)
and p.thread_nwd.tlv.border_router.flag.p == [1, 1] and p.thread_nwd.tlv.border_router.flag.s ==
[1, 1] and p.thread_nwd.tlv.border_router.flag.r == [1, 1] and p.thread_nwd.tlv.border_router.
flag.o == [1, 1] and p.thread_nwd.tlv.stable == [0, 1, 1, 1, 0, 0, 0])
# Step 5: The DUT MUST send a unicast MLE Child Update
# Response to MED_1
_rpkts_med.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).filter_wpan_dst64(MED).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type))
# Step 6: The DUT MUST send a unicast MLE Child Update
# Request to SED_1
_rpkts_sed.filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).filter_wpan_dst64(SED).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV} == set(
p.mle.tlv.type) and {Ipv6Addr('2001:2:0:1::')} == set(p.thread_nwd.tlv.prefix) and p.thread_nwd.tlv.
border_router.flag.p == [1] and p.thread_nwd.tlv.border_router.flag.s == [1] and p.thread_nwd.tlv.
border_router.flag.r == [1] and p.thread_nwd.tlv.border_router.flag.o == [1] and p.thread_nwd.tlv.stable ==
[1, 1, 1] and p.thread_nwd.tlv.border_router_16 == [0xFFFE])
# Step 8: The DUT MUST send a unicast MLE Child Update
# Response to SED_1
_rpkts_sed.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).filter_wpan_dst64(SED).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type))
if __name__ == '__main__':
unittest.main()
|
mukgupta/django-fast-deploy | refs/heads/master | fab_deploy/config_templates/django_wsgi.py | 1 | import os
import sys
import site
# prevent errors with 'print' commands
sys.stdout = sys.stderr
# adopted from http://code.google.com/p/modwsgi/wiki/VirtualEnvironments
def add_to_path(dirs):
# Remember original sys.path.
prev_sys_path = list(sys.path)
# Add each new site-packages directory.
for directory in dirs:
site.addsitedir(directory)
# Reorder sys.path so new directories at the front.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
add_to_path([
os.path.normpath('{{ ENV_DIR }}/lib/python2.5/site-packages'),
os.path.normpath('{{ ENV_DIR }}/lib/python2.6/site-packages'),
os.path.normpath('{{ ENV_DIR }}/lib/python2.7/site-packages'),
os.path.normpath('{{ PROJECT_DIR }}' + '/..'), # <- remove this line if django >= 1.4
'{{ PROJECT_DIR }}',
])
# django < 1.4 wsgi setup
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
# django >= 1.4 wsgi setup: remove "<1.4 wsgi setup" above, uncomment the
# following line and change "my_project" to your project's name:
# from my_project.wsgi import application |
schelleg/PYNQ | refs/heads/master | tests/test_gpio.py | 4 | import os
import pynq
import pytest
import shutil
import importlib
@pytest.fixture(params=[pynq.ps.ZU_ARCH, pynq.ps.ZYNQ_ARCH])
def gpio(request):
old_arch = pynq.ps.CPU_ARCH
pynq.ps.CPU_ARCH = request.param
new_gpio = importlib.reload(pynq.gpio)
yield new_gpio
pynq.ps.CPU_ARCH = old_arch
expected_min_pins = {
pynq.ps.ZYNQ_ARCH: 54,
pynq.ps.ZU_ARCH: 78
}
def test_gpio_offset(gpio):
assert gpio.GPIO._GPIO_MIN_USER_PIN == expected_min_pins[pynq.ps.CPU_ARCH]
def test_gpio_warning():
with pytest.warns(ResourceWarning):
importlib.reload(pynq.gpio)
ZYNQ_CHIPS = [
(338, 96, 'zynq_gpio'),
(120, 32, 'ti-gpio')
]
# Offset the base so user index is the same
ZU_CHIPS = [
(314, 96, 'zynq_gpio'),
(120, 32, 'ti-gpio')
]
chip_dict = {
pynq.ps.ZYNQ_ARCH: ZYNQ_CHIPS,
pynq.ps.ZU_ARCH: ZU_CHIPS
}
def be_root():
return 0
@pytest.fixture
def as_root(monkeypatch):
monkeypatch.setattr(os, 'geteuid', be_root)
def export_hook(f):
f.filesystem.create_file(
'/sys/class/gpio/gpio' + f.byte_contents.decode() + '/value',
contents='1')
def unexport_hook(f):
shutil.rmtree('/sys/class/gpio/gpio' + f.byte_contents.decode())
def _create_gpiofs(fs, chips=None):
if chips is None:
chips = chip_dict[pynq.ps.CPU_ARCH]
fs.create_file('/sys/class/gpio/export', side_effect=export_hook)
fs.create_file('/sys/class/gpio/unexport', side_effect=unexport_hook)
os.mkdir('/sys/class/gpio/other_dir')
for base, width, name in chips:
chippath = os.path.join('/sys/class/gpio/gpiochip' + str(base))
fs.create_file(os.path.join(chippath, 'label'), contents=name)
fs.create_file(os.path.join(chippath, 'ngpio'), contents=str(width))
def _file_contents(path):
with open(path, 'r') as f:
return f.read()
def _set_contents(path, contents):
with open(path, 'w') as f:
f.write(contents)
def test_get_base(gpio, fs):
_create_gpiofs(fs)
assert gpio.GPIO.get_gpio_pin(10) == 402
assert gpio.GPIO.get_gpio_npins() == 96
assert gpio.GPIO.get_gpio_pin(10, 'ti-gpio') == 130
assert gpio.GPIO.get_gpio_npins('ti-gpio') == 32
# assert gpio.GPIO.get_gpio_pin(10, 'unknown') == None
assert gpio.GPIO.get_gpio_npins('unknown') is None
assert gpio.GPIO.get_gpio_base('unknonw') is None
def test_gpio_in(gpio, fs, as_root):
_create_gpiofs(fs)
pin = gpio.GPIO(400, 'in')
assert _file_contents('/sys/class/gpio/export') == '400'
assert _file_contents('/sys/class/gpio/gpio400/direction') == 'in'
assert pin.path == '/sys/class/gpio/gpio400/'
assert pin.index == 400
assert pin.direction == 'in'
assert pin.read() == 1
_set_contents('/sys/class/gpio/gpio400/value', '0')
assert pin.read() == 0
with pytest.raises(AttributeError):
pin.write(1)
pin.release()
assert os.path.exists('/sys/class/gpio/gpio400') is False
def test_gpio_out(gpio, fs, as_root):
_create_gpiofs(fs)
pin = gpio.GPIO(400, 'out')
assert _file_contents('/sys/class/gpio/export') == '400'
assert _file_contents('/sys/class/gpio/gpio400/direction') == 'out'
assert pin.path == '/sys/class/gpio/gpio400/'
assert pin.index == 400
assert pin.direction == 'out'
pin.write(1)
assert _file_contents('/sys/class/gpio/gpio400/value') == '1'
pin.write(0)
assert _file_contents('/sys/class/gpio/gpio400/value') == '0'
with pytest.raises(AttributeError):
pin.read()
with pytest.raises(ValueError):
pin.write(2)
pin.release()
assert os.path.exists('/sys/class/gpio/gpio400') is False
def test_gpio_exists(gpio, fs, as_root):
_create_gpiofs(fs)
_set_contents('/sys/class/gpio/export', '400')
in_gpio = gpio.GPIO(400, 'in')
with pytest.raises(AttributeError):
out_gpio = gpio.GPIO(400, 'out')
in_gpio.release()
out_gpio = gpio.GPIO(400, 'out')
out_gpio2 = gpio.GPIO(400, 'out')
assert out_gpio._impl == out_gpio2._impl
out_gpio.release()
assert os.path.exists('/sys/class/gpio/gpio400') is False
out_gpio2.release()
def test_permissions_check(gpio):
with pytest.raises(EnvironmentError):
gpio.GPIO(400, 'out')
def test_invalid_direction(gpio, as_root):
with pytest.raises(ValueError):
gpio.GPIO(400, 'direction')
|
jplusui/xfly | refs/heads/master | xfly/node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 505 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/lepl/apps/rfc3696.py | 2 |
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
'''
Matchers for validating URIs and related objects, taken from RFC3696.
IMPORTANT - the emphasis here is on validation of user input.
These matchers are not exact matches for the underlying specs - they are
just useful practical approximations. Read RFC3696 to see what I mean
(or the quotes from that doc in the source below).
'''
from re import compile as compile_
from string import ascii_letters, digits, printable, whitespace
from lepl import *
_HEX = digits + 'abcdef' + 'ABCDEF'
def _guarantee_bool(function):
'''
A decorator that guarantees a true/false response.
'''
def wrapper(*args, **kargs):
try:
return bool(function(*args, **kargs))
except:
return False
return wrapper
def _matcher_to_validator(factory):
'''
Generate a validator based on the given matcher factory.
'''
matcher = factory()
matcher.config.compile_to_re().no_memoize()
@_guarantee_bool
def validator(value):
for char in '\n\r':
assert char not in value
return matcher.parse(value)
return validator
def _LimitLength(matcher, length):
'''
Reject a match if it exceeds a certain length.
'''
return PostCondition(matcher, lambda results: len(results[0]) <= length)
def _RejectRegexp(matcher, pattern):
'''
Reject a match if it matches a (ie some other) regular expression
'''
regexp = compile_(pattern)
return PostCondition(matcher, lambda results: not regexp.match(results[0]))
def _LimitIntValue(matcher, max):
'''
Reject a match if the value exceeds some value.
'''
return PostCondition(matcher, lambda results: int(results[0]) <= max)
def _LimitCount(matcher, char, max):
'''
Reject a match if the number of times a particular character occurs exceeds
some value.
'''
return PostCondition(matcher, lambda results: results[0].count(char) <= max)
def _PreferredFullyQualifiedDnsName():
'''
A matcher for DNS names.
RFC 3696:
Any characters, or combination of bits (as octets), are permitted in
DNS names. However, there is a preferred form that is required by
most applications. This preferred form has been the only one
permitted in the names of top-level domains, or TLDs. In general, it
is also the only form permitted in most second-level names registered
in TLDs, although some names that are normally not seen by users obey
other rules. It derives from the original ARPANET rules for the
naming of hosts (i.e., the "hostname" rule) and is perhaps better
described as the "LDH rule", after the characters that it permits.
The LDH rule, as updated, provides that the labels (words or strings
separated by periods) that make up a domain name must consist of only
the ASCII [ASCII] alphabetic and numeric characters, plus the hyphen.
No other symbols or punctuation characters are permitted, nor is
blank space. If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label. There is an additional rule
that essentially requires that top-level domain names not be all-
numeric.
[...]
Most internet applications that reference other hosts or systems
assume they will be supplied with "fully-qualified" domain names,
i.e., ones that include all of the labels leading to the root,
including the TLD name. Those fully-qualified domain names are then
passed to either the domain name resolution protocol itself or to the
remote systems. Consequently, purported DNS names to be used in
applications and to locate resources generally must contain at least
one period (".") character.
[...]
[...]It is
likely that the better strategy has now become to make the "at least
one period" test, to verify LDH conformance (including verification
that the apparent TLD name is not all-numeric), and then to use the
DNS to determine domain name validity, rather than trying to maintain
a local list of valid TLD names.
[...]
A DNS label may be no more than 63 octets long. This is in the form
actually stored; if a non-ASCII label is converted to encoded
"punycode" form (see Section 5), the length of that form may restrict
the number of actual characters (in the original character set) that
can be accommodated. A complete, fully-qualified, domain name must
not exceed 255 octets.
'''
ld = Any(ascii_letters + digits)
ldh = ld | '-'
label = ld + Optional(ldh[:] + ld)
short_label = _LimitLength(label, 63)
tld = _RejectRegexp(short_label, r'^[0-9]+$')
any_name = short_label[1:, r'\.', ...] + '.' + tld
non_numeric = _RejectRegexp(any_name, r'^[0-9\.]+$')
short_name = _LimitLength(non_numeric, 255)
return short_name
def _IpV4Address():
'''
A matcher for IPv4 addresses.
RFC 3696 doesn't say much about these; RFC 2396 doesn't mention limits
on numerical values, but it must be 255.
'''
octet = _LimitIntValue(Any(digits)[1:, ...], 255)
address = octet[4, '.', ...]
return address
def _Ipv6Address():
'''
A matcher for IPv6 addresses.
Again, RFC 3696 says little; RFC 2373 (addresses) and 2732 (URLs) have
much more information:
1. The preferred form is x:x:x:x:x:x:x:x, where the 'x's are the
hexadecimal values of the eight 16-bit pieces of the address.
Examples:
FEDC:BA98:7654:3210:FEDC:BA98:7654:3210
1080:0:0:0:8:800:200C:417A
Note that it is not necessary to write the leading zeros in an
individual field, but there must be at least one numeral in every
field (except for the case described in 2.).
2. Due to some methods of allocating certain styles of IPv6
addresses, it will be common for addresses to contain long strings
of zero bits. In order to make writing addresses containing zero
bits easier a special syntax is available to compress the zeros.
The use of "::" indicates multiple groups of 16-bits of zeros.
The "::" can only appear once in an address. The "::" can also be
used to compress the leading and/or trailing zeros in an address.
For example the following addresses:
1080:0:0:0:8:800:200C:417A a unicast address
FF01:0:0:0:0:0:0:101 a multicast address
0:0:0:0:0:0:0:1 the loopback address
0:0:0:0:0:0:0:0 the unspecified addresses
may be represented as:
1080::8:800:200C:417A a unicast address
FF01::101 a multicast address
::1 the loopback address
:: the unspecified addresses
3. An alternative form that is sometimes more convenient when dealing
with a mixed environment of IPv4 and IPv6 nodes is
x:x:x:x:x:x:d.d.d.d, where the 'x's are the hexadecimal values of
the six high-order 16-bit pieces of the address, and the 'd's are
the decimal values of the four low-order 8-bit pieces of the
address (standard IPv4 representation). Examples:
0:0:0:0:0:0:13.1.68.3
0:0:0:0:0:FFFF:129.144.52.38
or in compressed form:
::13.1.68.3
::FFFF:129.144.52.38
'''
piece = Any(_HEX)[1:4, ...]
preferred = piece[8, ':', ...]
# we need to be careful about how we match the compressed form, since we
# have a limit on the total number of pieces. the simplest approach seems
# to be to limit the final number of ':' characters, but we must take
# care to treat the cases where '::' is at one end separately:
# 1::2:3:4:5:6:7 has 7 ':' characters
# 1:2:3:4:5:6:7:: has 8 ':' characters
compact = Or(_LimitCount(piece[1:6, ':', ...] + '::' + piece[1:6, ':', ...],
':', 7),
'::' + piece[1:7, ':', ...],
piece[1:7, ':', ...] + '::',
'::')
# similar to above, but we need to also be careful about the separator
# between the v6 and v4 parts
alternate = \
Or(piece[6, ':', ...] + ':',
_LimitCount(piece[1:4, ':', ...] + '::' + piece[1:4, ':', ...],
':', 5),
'::' + piece[1:5, ':', ...] + ':',
piece[1:5, ':', ...] + '::',
'::') + _IpV4Address()
return (preferred | compact | alternate)
def _EmailLocalPart():
'''
A matcher for the local part ("username") of an email address.
RFC 3696:
Contemporary email addresses consist of a "local part" separated from
a "domain part" (a fully-qualified domain name) by an at-sign ("@").
The syntax of the domain part corresponds to that in the previous
section. The concerns identified in that section about filtering and
lists of names apply to the domain names used in an email context as
well. The domain name can also be replaced by an IP address in
square brackets, but that form is strongly discouraged except for
testing and troubleshooting purposes.
The local part may appear using the quoting conventions described
below. The quoted forms are rarely used in practice, but are
required for some legitimate purposes. Hence, they should not be
rejected in filtering routines but, should instead be passed to the
email system for evaluation by the destination host.
The exact rule is that any ASCII character, including control
characters, may appear quoted, or in a quoted string. When quoting
is needed, the backslash character is used to quote the following
character.
[...]
In addition to quoting using the backslash character, conventional
double-quote characters may be used to surround strings.
[...]
Without quotes, local-parts may consist of any combination of
alphabetic characters, digits, or any of the special characters
! # $ % & ' * + - / = ? ^ _ ` . { | } ~
period (".") may also appear, but may not be used to start or end the
local part, nor may two or more consecutive periods appear. Stated
differently, any ASCII graphic (printing) character other than the
at-sign ("@"), backslash, double quote, comma, or square brackets may
appear without quoting. If any of that list of excluded characters
are to appear, they must be quoted.
[...]
In addition to restrictions on syntax, there is a length limit on
email addresses. That limit is a maximum of 64 characters (octets)
in the "local part" (before the "@") and a maximum of 255 characters
(octets) in the domain part (after the "@") for a total length of 320
characters. Systems that handle email should be prepared to process
addresses which are that long, even though they are rarely
encountered.
'''
unescaped_chars = ascii_letters + digits + "!#$%&'*+-/=?^_`.{|}~"
escapable_chars = unescaped_chars + r'@\",[] '
quotable_chars = unescaped_chars + r'@\,[] '
unquoted_string = (('\\' + Any(escapable_chars))
| Any(unescaped_chars))[1:, ...]
quoted_string = '"' + Any(quotable_chars)[1:, ...] + '"'
local_part = quoted_string | unquoted_string
no_extreme_dot = _RejectRegexp(local_part, r'"?\..*\."?')
no_double_dot = _RejectRegexp(no_extreme_dot, r'.*\."*\..*')
short_local_part = _LimitLength(no_double_dot, 64)
return short_local_part
def _Email():
'''
A matcher for email addresses.
'''
return _EmailLocalPart() + '@' + _PreferredFullyQualifiedDnsName()
def Email():
'''
Generate a validator for emails, according to RFC3696, which returns True
if the email is valid, and False otherwise.
'''
return _matcher_to_validator(_Email)
def _HttpUrl():
'''
A matcher for HTTP URLs.
RFC 3696:
The following characters are reserved in many URIs -- they must be
used for either their URI-intended purpose or must be encoded. Some
particular schemes may either broaden or relax these restrictions
(see the following sections for URLs applicable to "web pages" and
electronic mail), or apply them only to particular URI component
parts.
; / ? : @ & = + $ , ?
In addition, control characters, the space character, the double-
quote (") character, and the following special characters
< > # %
are generally forbidden and must either be avoided or escaped, as
discussed below.
[...]
When it is necessary to encode these, or other, characters, the
method used is to replace it with a percent-sign ("%") followed by
two hexidecimal digits representing its octet value. See section
2.4.1 of [RFC2396] for an exact definition. Unless it is used as a
delimiter of the URI scheme itself, any character may optionally be
encoded this way; systems that are testing URI syntax should be
prepared for these encodings to appear in any component of the URI
except the scheme name itself.
[...]
Absolute HTTP URLs consist of the scheme name, a host name (expressed
as a domain name or IP address), and optional port number, and then,
optionally, a path, a search part, and a fragment identifier. These
are separated, respectively, by a colon and the two slashes that
precede the host name, a colon, a slash, a question mark, and a hash
mark ("#"). So we have
http://host:port/path?search#fragment
http://host/path/
http://host/path#fragment
http://host/path?search
http://host
and other variations on that form. There is also a "relative" form,
but it almost never appears in text that a user might, e.g., enter
into a form. See [RFC2616] for details.
[...]
The characters
/ ; ?
are reserved within the path and search parts and must be encoded;
the first of these may be used unencoded, and is often used within
the path, to designate hierarchy.
'''
path_chars = ''.join(set(printable).difference(set(whitespace))
.difference('/;?<>#%'))
other_chars = path_chars + '/'
path_string = ('%' + Any(_HEX)[2, ...] | Any(path_chars))[1:, ...]
other_string = ('%' + Any(_HEX)[2, ...] | Any(other_chars))[1:, ...]
host = _IpV4Address() | ('[' + _Ipv6Address() + ']') | \
_PreferredFullyQualifiedDnsName()
url = 'http://' + host + \
Optional(':' + Any(digits)[1:, ...]) + \
Optional('/' +
Optional(path_string[1:, '/', ...] + Optional('/')) +
Optional('?' + other_string) +
Optional('#' + other_string))
return url
def HttpUrl():
'''
Generate a validator for HTTP URLs, according to RFC3696, which returns
True if the email is valid, and False otherwise.
'''
return _matcher_to_validator(_HttpUrl)
def MailToUrl():
'''
Generate a validator for email addresses, according to RFC3696, which
returns True if the URL is valid, and False otherwise.
RFC 3696:
The following characters may appear in MAILTO URLs only with the
specific defined meanings given. If they appear in an email address
(i.e., for some other purpose), they must be encoded:
: The colon in "mailto:"
< > # " % { } | \ ^ ~ `
These characters are "unsafe" in any URL, and must always be
encoded.
The following characters must also be encoded if they appear in a
MAILTO URL
? & =
Used to delimit headers and their values when these are encoded
into URLs.
----------
The RFC isn't that great a guide here. The best approach, I think, is
to check the URL for "forbidden" characters, then decode it, and finally
validate the decoded email. So we implement the validator directly (ie
this is not a matcher).
'''
MAIL_TO = 'mailto:'
encoded_token = compile_('(%.{0,2})')
email = _Email()
email.config.compile_to_re().no_memoize()
@_guarantee_bool
def validator(url):
assert url.startswith(MAIL_TO)
url = url[len(MAIL_TO):]
for char in r':<>#"{}|\^~`':
assert char not in url
def unpack(chunk):
if chunk.startswith('%'):
assert len(chunk) == 3
return chr(int(chunk[1:], 16))
else:
return chunk
url = ''.join(unpack(chunk) for chunk in encoded_token.split(url))
assert url
return email.parse(url)
return validator
|
davisein/jitsudone | refs/heads/master | django/django/contrib/gis/geos/io.py | 623 | """
Module that holds classes for performing I/O operations on GEOS geometry
objects. Specifically, this has Python implementations of WKB/WKT
reader and writer classes.
"""
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.prototypes.io import _WKTReader, _WKBReader, WKBWriter, WKTWriter
# Public classes for (WKB|WKT)Reader, which return GEOSGeometry
class WKBReader(_WKBReader):
def read(self, wkb):
"Returns a GEOSGeometry for the given WKB buffer."
return GEOSGeometry(super(WKBReader, self).read(wkb))
class WKTReader(_WKTReader):
def read(self, wkt):
"Returns a GEOSGeometry for the given WKT string."
return GEOSGeometry(super(WKTReader, self).read(wkt))
|
jeremiahyan/odoo | refs/heads/master | addons/sale_quotation_builder/__manifest__.py | 11 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Quotation Builder',
'category': 'Sales/Sales',
'summary': 'Build great quotation templates',
'website': 'https://www.odoo.com/page/quote-builder',
'version': '1.0',
'description': "Design great quotation templates with building blocks to significantly boost your success rate.",
'depends': ['website', 'sale_management', 'website_mail'],
'data': [
'data/sale_order_template_data.xml',
'views/sale_portal_templates.xml',
'views/sale_order_template_views.xml',
'views/res_config_settings_views.xml',
'views/sale_order_views.xml',
],
'installable': True,
}
|
ismangil/pjproject | refs/heads/master | pjsip-apps/src/pygui/accountsetting.py | 6 | # $Id$
#
# pjsua Python GUI Demo
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if sys.version_info[0] >= 3: # Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msgbox
else:
import Tkinter as tk
import tkMessageBox as msgbox
import ttk
import pjsua2 as pj
import endpoint
import application
class Dialog(tk.Toplevel):
"""
This implements account settings dialog to manipulate account settings.
"""
def __init__(self, parent, cfg):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
self.parent = parent
self.geometry("+100+100")
self.title('Account settings')
self.frm = ttk.Frame(self)
self.frm.pack(expand='yes', fill='both')
self.isOk = False
self.cfg = cfg
self.createWidgets()
def doModal(self):
if self.parent:
self.parent.wait_window(self)
else:
self.wait_window(self)
return self.isOk
def createWidgets(self):
# The notebook
self.frm.rowconfigure(0, weight=1)
self.frm.rowconfigure(1, weight=0)
self.frm.columnconfigure(0, weight=1)
self.frm.columnconfigure(1, weight=1)
self.wTab = ttk.Notebook(self.frm)
self.wTab.grid(column=0, row=0, columnspan=2, padx=10, pady=10, ipadx=20, ipady=20, sticky=tk.N+tk.S+tk.W+tk.E)
# Main buttons
btnOk = ttk.Button(self.frm, text='Ok', command=self.onOk)
btnOk.grid(column=0, row=1, sticky=tk.E, padx=20, pady=10)
btnCancel = ttk.Button(self.frm, text='Cancel', command=self.onCancel)
btnCancel.grid(column=1, row=1, sticky=tk.W, padx=20, pady=10)
# Tabs
self.createBasicTab()
self.createSipTab()
self.createMediaTab()
self.createMediaNatTab()
def createBasicTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgPriority = tk.IntVar(value=self.cfg.priority)
self.cfgAccId = tk.StringVar(value=self.cfg.idUri)
self.cfgRegistrar = tk.StringVar(value=self.cfg.regConfig.registrarUri)
self.cfgRegisterOnAdd = tk.BooleanVar(value=self.cfg.regConfig.registerOnAdd)
self.cfgUsername = tk.StringVar()
self.cfgPassword = tk.StringVar()
if len(self.cfg.sipConfig.authCreds):
self.cfgUsername.set( self.cfg.sipConfig.authCreds[0].username )
self.cfgPassword.set( self.cfg.sipConfig.authCreds[0].data )
self.cfgProxy = tk.StringVar()
if len(self.cfg.sipConfig.proxies):
self.cfgProxy.set( self.cfg.sipConfig.proxies[0] )
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='Priority:').grid(row=row, column=0, sticky=tk.E, pady=2)
tk.Spinbox(frm, from_=0, to=9, textvariable=self.cfgPriority, width=2).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='ID (URI):').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgAccId, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Registrar URI:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgRegistrar, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Checkbutton(frm, text='Register on add', variable=self.cfgRegisterOnAdd).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='Optional proxy URI:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgProxy, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Auth username:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgUsername, width=16).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Password:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgPassword, show='*', width=16).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
self.wTab.add(frm, text='Basic Settings')
def createSipTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgPrackUse = tk.IntVar(value=self.cfg.callConfig.prackUse)
self.cfgTimerUse = tk.IntVar(value=self.cfg.callConfig.timerUse)
self.cfgTimerExpires = tk.IntVar(value=self.cfg.callConfig.timerSessExpiresSec)
self.cfgPublish = tk.BooleanVar(value=self.cfg.presConfig.publishEnabled)
self.cfgMwiEnabled = tk.BooleanVar(value=self.cfg.mwiConfig.enabled)
self.cfgEnableContactRewrite = tk.BooleanVar(value=self.cfg.natConfig.contactRewriteUse != 0)
self.cfgEnableViaRewrite = tk.BooleanVar(value=self.cfg.natConfig.viaRewriteUse != 0)
self.cfgEnableSdpRewrite = tk.BooleanVar(value=self.cfg.natConfig.sdpNatRewriteUse != 0)
self.cfgEnableSipOutbound = tk.BooleanVar(value=self.cfg.natConfig.sipOutboundUse != 0)
self.cfgKaInterval = tk.IntVar(value=self.cfg.natConfig.udpKaIntervalSec)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='100rel/PRACK:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Only offer PRACK', value=pj.PJSUA_100REL_NOT_USED, variable=self.cfgPrackUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Offer and use if remote supports', value=pj.PJSUA_100REL_OPTIONAL, variable=self.cfgPrackUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Required', value=pj.PJSUA_100REL_MANDATORY, variable=self.cfgPrackUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Session Timer:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Not offered', value=pj.PJSUA_SIP_TIMER_INACTIVE, variable=self.cfgTimerUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Optional', value=pj.PJSUA_SIP_TIMER_OPTIONAL, variable=self.cfgTimerUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Required', value=pj.PJSUA_SIP_TIMER_REQUIRED, variable=self.cfgTimerUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text="Always use", value=pj.PJSUA_SIP_TIMER_ALWAYS, variable=self.cfgTimerUse).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Session Timer Expiration:').grid(row=row, column=0, sticky=tk.E, pady=2)
tk.Spinbox(frm, from_=90, to=7200, textvariable=self.cfgTimerExpires, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(seconds)').grid(row=row, column=1, sticky=tk.E)
row += 1
ttk.Label(frm, text='Presence:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable PUBLISH', variable=self.cfgPublish).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='Message Waiting Indication:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable MWI', variable=self.cfgMwiEnabled).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='NAT Traversal:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable Contact Rewrite', variable=self.cfgEnableContactRewrite).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Enable Via Rewrite', variable=self.cfgEnableViaRewrite).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Enable SDP IP Address Rewrite', variable=self.cfgEnableSdpRewrite).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Enable SIP Outbound Extension', variable=self.cfgEnableSipOutbound).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='UDP Keep-Alive Interval:').grid(row=row, column=0, sticky=tk.E, pady=2)
tk.Spinbox(frm, from_=0, to=3600, textvariable=self.cfgKaInterval, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(seconds) Zero to disable.').grid(row=row, column=1, sticky=tk.E)
self.wTab.add(frm, text='SIP Features')
def createMediaTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgMedPort = tk.IntVar(value=self.cfg.mediaConfig.transportConfig.port)
self.cfgMedPortRange = tk.IntVar(value=self.cfg.mediaConfig.transportConfig.portRange)
self.cfgMedLockCodec = tk.BooleanVar(value=self.cfg.mediaConfig.lockCodecEnabled)
self.cfgMedSrtp = tk.IntVar(value=self.cfg.mediaConfig.srtpUse)
self.cfgMedSrtpSecure = tk.IntVar(value=self.cfg.mediaConfig.srtpSecureSignaling)
self.cfgMedIpv6 = tk.BooleanVar(value=self.cfg.mediaConfig.ipv6Use==pj.PJSUA_IPV6_ENABLED)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=21)
row = 0
ttk.Label(frm, text='Secure RTP (SRTP):').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Disable', value=pj.PJMEDIA_SRTP_DISABLED, variable=self.cfgMedSrtp).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Mandatory', value=pj.PJMEDIA_SRTP_MANDATORY, variable=self.cfgMedSrtp).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Optional (non-standard)', value=pj.PJMEDIA_SRTP_OPTIONAL, variable=self.cfgMedSrtp).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='SRTP signaling:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Does not require secure signaling', value=0, variable=self.cfgMedSrtpSecure).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Require secure next hop (TLS)', value=1, variable=self.cfgMedSrtpSecure).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Require secure end-to-end (SIPS)', value=2, variable=self.cfgMedSrtpSecure).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='RTP transport start port:').grid(row=row, column=0, sticky=tk.E, pady=2)
tk.Spinbox(frm, from_=0, to=65535, textvariable=self.cfgMedPort, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0: any)').grid(row=row, column=1, sticky=tk.E, pady=2)
row += 1
ttk.Label(frm, text='Port range:').grid(row=row, column=0, sticky=tk.E, pady=2)
tk.Spinbox(frm, from_=0, to=65535, textvariable=self.cfgMedPortRange, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0: not limited)').grid(row=row, column=1, sticky=tk.E, pady=2)
row += 1
ttk.Label(frm, text='Lock codec:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable', variable=self.cfgMedLockCodec).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='Use IPv6:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Yes', variable=self.cfgMedIpv6).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
self.wTab.add(frm, text='Media settings')
def createMediaNatTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgSipUseStun = tk.IntVar(value = self.cfg.natConfig.sipStunUse)
self.cfgMediaUseStun = tk.IntVar(value = self.cfg.natConfig.mediaStunUse)
self.cfgIceEnabled = tk.BooleanVar(value = self.cfg.natConfig.iceEnabled)
self.cfgIceAggressive = tk.BooleanVar(value = self.cfg.natConfig.iceAggressiveNomination)
self.cfgAlwaysUpdate = tk.BooleanVar(value = True if self.cfg.natConfig.iceAlwaysUpdate else False)
self.cfgIceNoHostCands = tk.BooleanVar(value = True if self.cfg.natConfig.iceMaxHostCands == 0 else False)
self.cfgTurnEnabled = tk.BooleanVar(value = self.cfg.natConfig.turnEnabled)
self.cfgTurnServer = tk.StringVar(value = self.cfg.natConfig.turnServer)
self.cfgTurnConnType = tk.IntVar(value = self.cfg.natConfig.turnConnType)
self.cfgTurnUser = tk.StringVar(value = self.cfg.natConfig.turnUserName)
self.cfgTurnPasswd = tk.StringVar(value = self.cfg.natConfig.turnPassword)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='SIP STUN Usage:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Default', value=pj.PJSUA_STUN_USE_DEFAULT, variable=self.cfgSipUseStun).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Disable', value=pj.PJSUA_STUN_USE_DISABLED, variable=self.cfgSipUseStun).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Media STUN Usage:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='Default', value=pj.PJSUA_STUN_USE_DEFAULT, variable=self.cfgMediaUseStun).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='Disable', value=pj.PJSUA_STUN_USE_DISABLED, variable=self.cfgMediaUseStun).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='ICE:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable', variable=self.cfgIceEnabled).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Use aggresive nomination', variable=self.cfgIceAggressive).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Always re-INVITE after negotiation', variable=self.cfgAlwaysUpdate).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='Disable host candidates', variable=self.cfgIceNoHostCands).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='TURN:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Checkbutton(frm, text='Enable', variable=self.cfgTurnEnabled).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='TURN server:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgTurnServer, width=20).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='host[:port]').grid(row=row, column=1, sticky=tk.E, pady=6)
row += 1
ttk.Label(frm, text='TURN connection:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Radiobutton(frm, text='UDP', value=pj.PJ_TURN_TP_UDP, variable=self.cfgTurnConnType).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Radiobutton(frm, text='TCP', value=pj.PJ_TURN_TP_TCP, variable=self.cfgTurnConnType).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='TURN username:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgTurnUser, width=16).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='TURN password:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgTurnPasswd, show='*', width=16).grid(row=row, column=1, sticky=tk.W, padx=6)
self.wTab.add(frm, text='NAT settings')
def onOk(self):
# Check basic settings
errors = "";
if not self.cfgAccId.get():
errors += "Account ID is required\n"
if self.cfgAccId.get():
if not endpoint.validateSipUri(self.cfgAccId.get()):
errors += "Invalid SIP ID URI: '%s'\n" % (self.cfgAccId.get())
if self.cfgRegistrar.get():
if not endpoint.validateSipUri(self.cfgRegistrar.get()):
errors += "Invalid SIP registrar URI: '%s'\n" % (self.cfgRegistrar.get())
if self.cfgProxy.get():
if not endpoint.validateSipUri(self.cfgProxy.get()):
errors += "Invalid SIP proxy URI: '%s'\n" % (self.cfgProxy.get())
if self.cfgTurnEnabled.get():
if not self.cfgTurnServer.get():
errors += "TURN server is required\n"
if errors:
msgbox.showerror("Error detected:", errors)
return
# Basic settings
self.cfg.priority = self.cfgPriority.get()
self.cfg.idUri = self.cfgAccId.get()
self.cfg.regConfig.registrarUri = self.cfgRegistrar.get()
self.cfg.regConfig.registerOnAdd = self.cfgRegisterOnAdd.get()
while len(self.cfg.sipConfig.authCreds):
self.cfg.sipConfig.authCreds.pop()
if self.cfgUsername.get():
cred = pj.AuthCredInfo()
cred.scheme = "digest"
cred.realm = "*"
cred.username = self.cfgUsername.get()
cred.data = self.cfgPassword.get()
self.cfg.sipConfig.authCreds.append(cred)
while len(self.cfg.sipConfig.proxies):
self.cfg.sipConfig.proxies.pop()
if self.cfgProxy.get():
self.cfg.sipConfig.proxies.append(self.cfgProxy.get())
# SIP features
self.cfg.callConfig.prackUse = self.cfgPrackUse.get()
self.cfg.callConfig.timerUse = self.cfgTimerUse.get()
self.cfg.callConfig.timerSessExpiresSec = self.cfgTimerExpires.get()
self.cfg.presConfig.publishEnabled = self.cfgPublish.get()
self.cfg.mwiConfig.enabled = self.cfgMwiEnabled.get()
self.cfg.natConfig.contactRewriteUse = 1 if self.cfgEnableContactRewrite.get() else 0
self.cfg.natConfig.viaRewriteUse = 1 if self.cfgEnableViaRewrite.get() else 0
self.cfg.natConfig.sdpNatRewriteUse = 1 if self.cfgEnableSdpRewrite.get() else 0
self.cfg.natConfig.sipOutboundUse = 1 if self.cfgEnableSipOutbound.get() else 0
self.cfg.natConfig.udpKaIntervalSec = self.cfgKaInterval.get()
# Media
self.cfg.mediaConfig.transportConfig.port = self.cfgMedPort.get()
self.cfg.mediaConfig.transportConfig.portRange = self.cfgMedPortRange.get()
self.cfg.mediaConfig.lockCodecEnabled = self.cfgMedLockCodec.get()
self.cfg.mediaConfig.srtpUse = self.cfgMedSrtp.get()
self.cfg.mediaConfig.srtpSecureSignaling = self.cfgMedSrtpSecure.get()
self.cfg.mediaConfig.ipv6Use = pj.PJSUA_IPV6_ENABLED if self.cfgMedIpv6.get() else pj.PJSUA_IPV6_DISABLED
# NAT
self.cfg.natConfig.sipStunUse = self.cfgSipUseStun.get()
self.cfg.natConfig.mediaStunUse = self.cfgMediaUseStun.get()
self.cfg.natConfig.iceEnabled = self.cfgIceEnabled.get()
self.cfg.natConfig.iceAggressiveNomination = self.cfgIceAggressive .get()
self.cfg.natConfig.iceAlwaysUpdate = self.cfgAlwaysUpdate.get()
self.cfg.natConfig.iceMaxHostCands = 0 if self.cfgIceNoHostCands.get() else -1
self.cfg.natConfig.turnEnabled = self.cfgTurnEnabled.get()
self.cfg.natConfig.turnServer = self.cfgTurnServer.get()
self.cfg.natConfig.turnConnType = self.cfgTurnConnType.get()
self.cfg.natConfig.turnUserName = self.cfgTurnUser.get()
self.cfg.natConfig.turnPasswordType = 0
self.cfg.natConfig.turnPassword = self.cfgTurnPasswd.get()
self.isOk = True
self.destroy()
def onCancel(self):
self.destroy()
if __name__ == '__main__':
application.main()
|
filipealmeida/probespawner | refs/heads/master | jelh.py | 1 | # Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
#Jython Elasticsearch Little Helper
#import org.apache.tomcat.jdbc.pool as dbpool
import org.elasticsearch.common.transport.InetSocketTransportAddress as InetSocketTransportAddress
import org.elasticsearch.client.transport.TransportClient as TransportClient
import org.elasticsearch.common.settings.ImmutableSettings as ImmutableSettings
import org.elasticsearch.client.transport.NoNodeAvailableException as NoNodeAvailableException
import org.elasticsearch.indices.IndexAlreadyExistsException as IndexAlreadyExistsException
import traceback
import time
import datetime
import com.xhaus.jyson.JysonCodec as json
import threading
import sys
import logging
logger = logging.getLogger(__name__)
class Elasticsearch():
def __init__(self, config):
if isinstance(config, basestring):
self.config = json.loads(config.decode('utf-8'))
else:
self.config = config
self.runtime = {}
#TODO: ugly ugly initialization all around, review, make it sane
clusterName = "elasticsearch"
host = "localhost"
port = 9300
if "host" in self.config:
host = self.config["host"]
else:
host = "localhost"
if "port" in self.config:
port = self.config["port"]
else:
port = "9300"
if "bulkActions" not in self.config:
self.config["bulkActions"] = 1000
if "bulkSize" not in self.config:
self.config["bulkSize"] = 107374182400
if "flushInterval" not in self.config:
self.config["flushInterval"] = 60000
if "concurrentRequests" not in self.config:
self.config["concurrentRequests"] = 1
if "actionRetryTimeout" not in self.config:
self.config["actionRetryTimeout"] = 5
if "type" not in self.config:
self.config["type"] = "logs"
if "indexPrefix" not in self.config:
self.config["indexPrefix"] = "sampleindex"
if "indexSuffix" not in self.config:
self.config["indexSuffix"] = "-%Y.%m.%d"
logger.debug("Initializing elasticsearch output %s: %s", self.config["indexPrefix"], json.dumps("self.config"))
self.config["settings"] = ImmutableSettings.settingsBuilder();
if "options" not in self.config:
self.config["options"] = {}
if "cluster" in self.config:
self.config["options"]["cluster.name"] = self.config["cluster"]
else:
self.config["options"]["cluster.name"] = "elasticsearch"
else:
if "cluster.name" not in self.config["options"]:
if "cluster" in self.config:
self.config["options"]["cluster.name"] = self.config["cluster"]
else:
self.config["options"]["cluster.name"] = "elasticsearch"
for setting in self.config["options"]:
value = self.config["options"][setting]
logger.info("Setting Elasticsearch options: %s = %s", setting, value)
self.config["settings"].put(setting, value)
self.config["settings"].build()
self.runtime["client"] = TransportClient(self.config["settings"])
if "host" in self.config:
self.runtime["client"].addTransportAddress(InetSocketTransportAddress(host, port))
if "hosts" in self.config:
for hostport in self.config["hosts"]:
host, port = hostport.split(":")
logger.info("Setting Elasticsearch host: %s = %s", host, port)
self.runtime["client"].addTransportAddress(InetSocketTransportAddress(str(host), int(port)))
self.readyBulk()
self.runtime["indices"] = {}
def createIndex(self, indexName):
if indexName not in self.runtime["indices"]:
if self.runtime["client"].admin().indices().prepareExists(indexName).execute().actionGet().exists:
logger.debug("Index \"%s\" already exists", indexName)
self.runtime["indices"][indexName] = time.time()
return False
else:
logger.info("Creating index %s", indexName)
if "index_settings" in self.config:
self.config["indexSettings"] = self.config["index_settings"]
if "type_mapping" in self.config:
self.config["typeMapping"] = self.config["type_mapping"]
try:
if "indexSettings" in self.config:
settingsJsonStr = json.dumps(self.config["indexSettings"])
logger.info("Index settings: %s", settingsJsonStr)
self.runtime["client"].admin().indices().prepareCreate(indexName).setSettings(settingsJsonStr).execute().actionGet()
else:
self.runtime["client"].admin().indices().prepareCreate(indexName).execute().actionGet()
except IndexAlreadyExistsException, ex:
logger.warning(ex)
logger.warning("Index %s already exists, this should be harmless", indexName)
if "typeMapping" in self.config:
mappingJsonStr = json.dumps(self.config["typeMapping"])
logger.info("Setting mapping for %s/%s - %s", indexName, self.config["type"], mappingJsonStr)
self.runtime["client"].admin().indices().preparePutMapping().setIndices(indexName).setType(self.config["type"]).setSource(mappingJsonStr).execute().actionGet()
self.runtime["indices"][indexName] = time.time()
logger.debug("Created index: \"%s\"", indexName)
return True
logger.debug("Index already initialized: \"%s\"", indexName)
return False
def writeDocument(self, data, force):
bulkRequest = self.runtime["bulkRequest"]
client = self.runtime["client"]
if data != None:
indexName = self.getIndexName(data)
if "_id" in data:
bulkRequest.add(client.prepareIndex(indexName, self.config["type"], data["_id"]).setSource(json.dumps(data)))
else:
bulkRequest.add(client.prepareIndex(indexName, self.config["type"]).setSource(json.dumps(data)))
self.runtime["requestsPending"] = self.runtime["requestsPending"] + 1
#TIME TO FLUSH
if (self.runtime["requestsPending"] > 0) and ((self.runtime["requestsPending"] >= self.config["bulkActions"]) or (force == True)):
logger.info("Flushing %d records", self.runtime["requestsPending"])
#TODO: handle failure: org.elasticsearch.client.transport.NoNodeAvailableException
#TODO: use JodaTime instead of jython's datetime/time
bulkReady = False
while not bulkReady:
try:
bulkResponse = bulkRequest.execute().actionGet();
bulkReady = True
except NoNodeAvailableException, ex:
logger.error(ex)
logger.warning("Bad bulk response, sleeping %d seconds before retrying, execution paused", self.config["actionRetryTimeout"])
time.sleep(self.config["actionRetryTimeout"]);
raise
if bulkResponse.hasFailures():
logger.warning("Failures indexing!")
logger.warning(bulkResponse.buildFailureMessage())
self.readyBulk()
return True
def readyBulk(self):
self.runtime["bulkRequest"] = self.runtime["client"].prepareBulk()
self.runtime["requestsPending"] = 0
def getIndexName(self):
return self.config["indexPrefix"]
def getIndexName(self, data):
indexName = self.config["indexPrefix"]
if "indexSuffix" in self.config:
indexSuffix = self.config["indexSuffix"]
if indexSuffix in data:
indexSuffixStr = ""
logger.debug("Index suffix is a fieldname: %s, %s", indexSuffix, data[indexSuffix])
#TODO: deal with the timezone
try:
dateobj = datetime.datetime.strptime(data[indexSuffix], "%Y-%m-%d")
indexSuffixStr = dateobj.strftime("%Y.%m.%d")
except Exception, e:
indexSuffixStr = data[indexSuffix].lower()
indexName = indexName + "-" + indexSuffixStr
else:
logger.debug("Index suffix is will be treated as datetime format: %s", indexSuffix)
indexName = indexName + datetime.datetime.utcnow().strftime(indexSuffix)
logger.debug("Going for index creation: %s", indexName)
#TODO: handle other failures
indexReady = False
while not indexReady:
try:
self.createIndex(indexName)
indexReady = True
except NoNodeAvailableException, ex:
logger.error(ex)
logger.warning("Failed to initialize index, sleeping a %d seconds before retrying, execution paused", self.config["actionRetryTimeout"])
time.sleep(self.config["actionRetryTimeout"])
return indexName
def flush(self):
return self.writeDocument(None, True)
def cleanup(self):
return False |
toastdriven/pubsubittyhub | refs/heads/master | bitty.py | 1 | # -*- coding: utf-8 -*-
"""
A tiny database layer.
Why another database layer? I wanted one that was small (both in terms of a
single file and in actual kloc), tested and could handle multiple data stores.
And because it was fun.
Example::
from bitty import *
bit = Bitty('sqlite:///home/code/my_database.db')
bitty.add('people', name='Claris', says='Moof!', age=37)
bitty.add('people', name='John Doe', says='No comment.', age=37)
# Select all.
for row in bitty.find('people'):
print row['name']
bit.close()
You're responsible for your own schema. bitty does the smallest amount of
introspection it can to get by. bitty supports the usual CRUD methods.
Tastes great when used with itty. Serious Python Programmers™ with Enterprise
Requirements need not apply.
"""
import re
__author__ = 'Daniel Lindsley'
__version__ = ('0', '5', '0', 'alpha')
FILESYSTEM_DSN = re.compile(r'^(?P<adapter>\w+)://(?P<path>.*)$')
DAEMON_DSN = re.compile(r'^(?P<adapter>\w+)://(?P<user>[\w\d_.-]+):(?P<password>[\w\d_.-]*?)@(?P<host>.*?)/(?P<database>.*?)$')
class BittyError(Exception): pass
class QueryError(BittyError): pass
class InvalidDSN(BittyError): pass
class BaseSQLAdapter(object):
BINDING_OP = '%s'
FILTER_OPTIONS = {
'lt': "%s < %s",
'lte': "%s <= %s",
'gt': "%s > %s",
'gte': "%s >= %s",
'startswith': "%s LIKE %s",
'endswith': "%s LIKE %s",
'contains': "%s LIKE %s",
}
def __init__(self, dsn):
self.connection = self.get_connection(dsn)
self._tables = {}
def get_connection(self, dsn):
raise NotImplementedError("Subclasses must implement the 'get_connection' method.")
def raw(self, query, params=[], commit=True):
cursor = self.connection.cursor()
try:
cursor.execute(query, params)
if commit:
self.connection.commit()
except:
self.connection.rollback()
raise
return cursor
def _get_column_names(self, **kwargs):
raise NotImplementedError("Subclasses must implement the '_get_column_names' method.")
def _build_insert_query(self, table, **kwargs):
column_names = sorted(kwargs.keys())
values = [kwargs[name] for name in column_names]
binds = [self.BINDING_OP for value in values]
query = "INSERT INTO %s (%s) VALUES (%s)" % (table, ', '.join(column_names), ', '.join(binds))
return query, values
def _build_where_clause(self, **kwargs):
if len(kwargs) == 0:
return '', []
clauses = []
bind_params = []
keys = sorted(kwargs.keys())
for column_spec in keys:
value = kwargs[column_spec]
column_info = column_spec.split('__')
if len(column_info) > 2:
raise QueryError("'%s' is not a supported lookup. Only one set of '__' is allowed." % column_spec)
if len(column_info) == 1:
clauses.append("%s = %s" % (column_info[0], self.BINDING_OP))
bind_params.append(value)
else:
if column_info[1] == 'in':
placeholders = [self.BINDING_OP for val in value]
clauses.append("%s IN (%s)" % (column_info[0], ', '.join(placeholders)))
bind_params.extend([val for val in value])
elif column_info[1] in self.FILTER_OPTIONS:
clauses.append(self.FILTER_OPTIONS[column_info[1]] % (column_info[0], self.BINDING_OP))
if column_info[1] in ('startswith', 'contains'):
value = "%s%%" % value
if column_info[1] in ('endswith', 'contains'):
value = "%%%s" % value
bind_params.append(value)
else:
# Assume an exact lookup.
clauses.append("%s = %s" % (column_info[0], self.BINDING_OP))
bind_params.append(value)
final_clause = "WHERE %s" % ' AND '.join(clauses)
return final_clause, bind_params
def _build_update_query(self, table, pk, **kwargs):
column_names = sorted(kwargs.keys())
values = [kwargs[name] for name in column_names]
# Add on the pk.
values.append(pk)
where = ["%s = %s" % (name, self.BINDING_OP) for name in column_names]
query = "UPDATE %s SET %s WHERE id = %s" % (table, ', '.join(where), self.BINDING_OP)
return query, values
def _build_delete_query(self, table, pk):
query = "DELETE FROM %s WHERE id = %s" % (table, self.BINDING_OP)
return query, [pk]
def _build_select_query(self, table, **kwargs):
all_column_names = self._get_column_names(table)
where_clause, where_values = self._build_where_clause(**kwargs)
query = "SELECT %s FROM %s" % (', '.join(all_column_names), table)
if len(kwargs):
query = "%s %s" % (query, where_clause)
return query, where_values
def add(self, table, **kwargs):
if not len(kwargs):
raise QueryError("The 'add' method requires at least one pair of kwargs.")
query, values = self._build_insert_query(table, **kwargs)
result = self.raw(query, params=values)
return result.rowcount == 1
def update(self, table, pk, **kwargs):
query, values = self._build_update_query(table, pk, **kwargs)
result = self.raw(query, params=values)
return result.rowcount == 1
def delete(self, table, pk):
query, values = self._build_delete_query(table, pk)
result = self.raw(query, params=values)
return result.rowcount == 1
def find(self, table, **kwargs):
query, values = self._build_select_query(table, **kwargs)
result = self.raw(query, params=values, commit=False)
rows = []
column_names = self._get_column_names(table)
for row in result.fetchall():
row_info = {}
for count, column in enumerate(row):
row_info[column_names[count]] = column
rows.append(row_info)
return rows
def close(self, commit=True):
if commit:
self.connection.commit()
return self.connection.close()
class SQLiteAdapter(BaseSQLAdapter):
BINDING_OP = '?'
FILTER_OPTIONS = {
'lt': "%s < %s",
'lte': "%s <= %s",
'gt': "%s > %s",
'gte': "%s >= %s",
'startswith': "%s LIKE %s ESCAPE '\\'",
'endswith': "%s LIKE %s ESCAPE '\\'",
'contains': "%s LIKE %s ESCAPE '\\'",
}
def get_connection(self, dsn):
match = FILESYSTEM_DSN.match(dsn)
if not match:
raise InvalidDSN("'sqlite' adapter received an invalid DSN '%s'." % dsn)
details = match.groupdict()
import sqlite3
return sqlite3.connect(details['path'])
def raw(self, query, params=[], commit=True):
cursor = self.connection.cursor()
# SQLite returns a new cursor. Use that instead.
try:
result = cursor.execute(query, params)
if commit:
self.connection.commit()
except:
self.connection.rollback()
raise
return result
def _get_column_names(self, table):
if not table in self._tables:
result = self.raw("SELECT * FROM %s" % table)
self._tables[table] = sorted([column[0] for column in result.description])
return self._tables[table]
class PostgresAdapter(BaseSQLAdapter):
def get_connection(self, dsn):
match = DAEMON_DSN.match(dsn)
if not match:
raise InvalidDSN("'postgres' adapter received an invalid DSN '%s'." % dsn)
details = match.groupdict()
import psycopg2
return psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (details['database'], details['user'], details['host'], details['password']))
def _get_column_names(self, table):
query = "SELECT a.attname AS column \
FROM pg_catalog.pg_attribute a \
WHERE a.attnum > 0 \
AND NOT a.attisdropped \
AND a.attrelid = ( \
SELECT c.oid \
FROM pg_catalog.pg_class c \
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \
WHERE c.relname ~ '^(%s)$' \
AND pg_catalog.pg_table_is_visible(c.oid) \
);" % table
if not table in self._tables:
result = self.raw(query.replace('\n', '').replace('\'', "'"), commit=False)
if not result:
raise QueryError("Table '%s' was not found or has no columns." % table)
self._tables[table] = sorted([column[0] for column in result.fetchall()])
return self._tables[table]
class MySQLAdapter(BaseSQLAdapter):
def get_connection(self, dsn):
match = DAEMON_DSN.match(dsn)
if not match:
raise InvalidDSN("'mysql' adapter received an invalid DSN '%s'." % dsn)
details = match.groupdict()
connection_details = {}
for key, value in details.items():
if key == 'database':
connection_details['db'] = details['database']
elif key == 'user':
connection_details['user'] = details['user']
elif key == 'host':
connection_details['host'] = details['host']
elif key == 'password':
connection_details['passwd'] = details['password']
import MySQLdb
return MySQLdb.connect(**connection_details)
def _get_column_names(self, table):
query = "DESC %s;" % table
if not table in self._tables:
result = self.raw(query, commit=False)
if not result:
raise QueryError("Table '%s' was not found or has no columns." % table)
self._tables[table] = sorted([column[0] for column in result.fetchall()])
return self._tables[table]
class Bitty(object):
ADAPTERS = {
'sqlite': SQLiteAdapter,
# 'json': JSONAdapter,
'mysql': MySQLAdapter,
'postgres': PostgresAdapter,
}
def __init__(self, dsn):
"""
Valid DSNs::
* sqlite:///Users/daniellindsley/test.db
* postgres://daniel:my_p4ss@localhost:5432/test_db
* mysql://daniel:my_p4ss@localhost/test_db
"""
self.dsn = dsn
self.adapter = self.get_adapter()
def get_adapter(self, dsn=None):
if dsn is None:
dsn = self.dsn
adapter_name = None
for name in self.ADAPTERS:
if dsn.startswith(name):
adapter_name = name
if adapter_name is None:
raise InvalidDSN("'%s' is not a recognizable DSN." % dsn)
adapter_klass = self.ADAPTERS[adapter_name]
return adapter_klass(dsn)
def add(self, table, **kwargs):
return self.adapter.add(table, **kwargs)
def update(self, table, pk, **kwargs):
return self.adapter.update(table, pk, **kwargs)
def delete(self, table, pk):
return self.adapter.delete(table, pk)
def find(self, table, **kwargs):
return self.adapter.find(table, **kwargs)
def get(self, table, **kwargs):
results = self.find(table, **kwargs)
if len(results) == 0:
return None
return results[0]
def raw(self, query, **kwargs):
return self.adapter.raw(query, **kwargs)
def close(self, commit=True):
return self.adapter.close(commit=commit)
|
valkjsaaa/sl4a | refs/heads/master | python/src/Lib/test/test_crypt.py | 58 | from test import test_support
import unittest
import crypt
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
c = crypt.crypt('mypassword', 'ab')
if test_support.verbose:
print 'Test encryption: ', c
def test_main():
test_support.run_unittest(CryptTestCase)
if __name__ == "__main__":
test_main()
|
almeidapaulopt/frappe | refs/heads/develop | frappe/data_migration/doctype/data_migration_connector/connectors/frappe_connection.py | 16 | from __future__ import unicode_literals
import frappe
from frappe.frappeclient import FrappeClient
from .base import BaseConnection
class FrappeConnection(BaseConnection):
def __init__(self, connector):
self.connector = connector
self.connection = FrappeClient(self.connector.hostname,
self.connector.username, self.get_password())
self.name_field = 'name'
def insert(self, doctype, doc):
doc = frappe._dict(doc)
doc.doctype = doctype
return self.connection.insert(doc)
def update(self, doctype, doc, migration_id):
doc = frappe._dict(doc)
doc.doctype = doctype
doc.name = migration_id
return self.connection.update(doc)
def delete(self, doctype, migration_id):
return self.connection.delete(doctype, migration_id)
def get(self, doctype, fields='"*"', filters=None, start=0, page_length=20):
return self.connection.get_list(doctype, fields=fields, filters=filters,
limit_start=start, limit_page_length=page_length)
|
thjashin/tensorflow | refs/heads/master | tensorflow/python/debug/lib/stepper_test.py | 39 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests of the tfdbg Stepper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.debug.lib.stepper import NodeStepper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class StepperTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToFetchNotInTransitiveClosureShouldError(self):
with NodeStepper(self.sess, "e:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(7, len(sorted_nodes))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertSetEqual(
{"e:0", "d:0", "c:0", "a/read:0", "b/read:0", "b:0", "a:0"},
set(stepper.closure_elements()))
with self.assertRaisesRegexp(
ValueError,
"Target \"f:0\" is not in the transitive closure for the fetch of "
"the stepper"):
stepper.cont("f:0")
def testContToNodeNameShouldReturnTensorValue(self):
with NodeStepper(self.sess, "e:0") as stepper:
self.assertAllClose(6.0, stepper.cont("c"))
def testUsingNamesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, "e:0") as stepper:
# The first cont() call should have used no feeds.
result = stepper.cont("c:0")
self.assertAllClose(6.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertEqual({}, stepper.last_feed_types())
# The second cont() call should have used the tensor handle from the
# previous cont() call.
result = stepper.cont("e:0")
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(4.0, stepper.get_tensor_value("d:0"))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testUsingNodesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, self.e) as stepper:
# There should be no handles before any cont() calls.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# Before the cont() call, the stepper should not have access to the value
# of c:0.
with self.assertRaisesRegexp(
ValueError,
"This stepper instance does not have access to the value of tensor "
"\"c:0\""):
stepper.get_tensor_value("c:0")
# Using the node/tensor itself, instead of the name str, should work on
# cont().
result = stepper.cont(self.c)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertEqual({"c"}, stepper.handle_node_names())
# After the cont() call, the stepper should have access to the value of
# c:0 via a tensor handle.
self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
result = stepper.cont(self.e)
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testContToTensorWithIntermediateDumpShouldUseDump(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(2.0, stepper.cont("a/read:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertAllClose(10.0, stepper.cont("f:0"))
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
def testDisablingUseDumpedIntermediatesWorks(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(10.0,
stepper.cont("f:0", use_dumped_intermediates=False))
self.assertEqual({}, stepper.last_feed_types())
def testIsFeedableShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertTrue(stepper.is_feedable("a/read:0"))
self.assertTrue(stepper.is_feedable("b/read:0"))
self.assertTrue(stepper.is_feedable("c:0"))
self.assertTrue(stepper.is_feedable("d:0"))
def testOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# There should be no overrides before any cont() calls.
self.assertEqual([], stepper.override_names())
# Calling cont() on c again should lead to use of the handle.
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# After the overriding, calling get_tensor_value() on c:0 should yield the
# overriding value.
self.assertEqual(7.0, stepper.get_tensor_value("c:0"))
# Now c:0 should have only an override value, but no cached handle,
# because the handle should have been invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Run a downstream tensor after the value override.
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
# Should use override, instead of the handle.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testOverrideValueTwice(self):
with NodeStepper(self.sess, self.e) as stepper:
# Override once.
stepper.override_tensor("c:0", 7.0)
self.assertAllClose(28.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
self.assertEqual(["e:0"], stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Calling cont(self.e) again. This time the cached tensor handle of e
# should be used.
self.assertEqual(28.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c again. This should have invalidated the cache for e.
stepper.override_tensor("c:0", 8.0)
self.assertEqual([], stepper.handle_names())
self.assertEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
self.assertAllClose(32.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"d:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testRemoveOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# The previous cont() step should have generated a cached tensor handle.
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# The overriding should have invalidated the tensor handle.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The handle to tensor e:0 should have been cached, even though its
# transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
# Remove the override.
stepper.remove_override("c:0")
# c:0 should not be in the overrides anymore.
self.assertEqual([], stepper.override_names())
# Removing the override should have invalidated the tensor handle for c.
self.assertNotIn("e:0", stepper.handle_names())
self.assertNotIn("e", stepper.handle_node_names())
# Should reflect the non-overriding value.
self.assertAllClose(24.0, stepper.cont(self.e))
# This time, the handle to tensor e:0 should have been cached again, even
# thought its transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertIn("e", stepper.handle_node_names())
# Calling cont(self.e) again should have used the tensor handle to e:0.
self.assertAllClose(24.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE,
}, stepper.last_feed_types())
def testOverrideAndContToSameTensor(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
self.assertAllClose(6.0, stepper.cont(self.c))
# The last cont() call should use the tensor handle directly.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# As a result of the override, the tensor handle should have been
# invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
result = stepper.cont(self.c)
self.assertAllClose(7.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testFinalizeWithPreviousOverrides(self):
with NodeStepper(self.sess, self.e) as stepper:
stepper.override_tensor("a/read:0", 20.0)
self.assertEqual(["a/read:0"], stepper.override_names())
# Should reflect the overriding value.
self.assertAllClose(24000.0, stepper.cont("e:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
# Finalize call should have ignored the overriding value.
self.assertAllClose(24.0, stepper.finalize())
def testRemoveNonexistentOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertEqual([], stepper.override_names())
with self.assertRaisesRegexp(
ValueError, "No overriding value exists for tensor \"c:0\""):
stepper.remove_override("c:0")
def testAttemptToOverrideInvalidTensor(self):
stepper = NodeStepper(self.sess, self.e)
with self.assertRaisesRegexp(ValueError, "Cannot override tensor \"f:0\""):
stepper.override_tensor("f:0", 42.0)
def testInvalidOverrideArgumentType(self):
with NodeStepper(self.sess, self.e) as stepper:
with self.assertRaisesRegexp(TypeError, "Expected type str; got type"):
stepper.override_tensor(self.a, 42.0)
def testTransitiveClosureWithCrossLinksShouldHaveCorrectOrder(self):
with NodeStepper(self.sess, "z:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(4, len(sorted_nodes))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
def testNodeStepperConstructorShouldAllowListOrTupleOrDictOfFetches(self):
for i in range(6):
if i == 0:
fetches = [self.e, [self.f, self.z]]
elif i == 1:
fetches = (self.e, (self.f, self.z))
elif i == 2:
fetches = {"e": self.e, "fz": {"f": self.f, "z": self.z}}
elif i == 3:
fetches = ["e:0", ["f:0", "z:0"]]
elif i == 4:
fetches = ("e:0", ("f:0", "z:0"))
elif i == 5:
fetches = {"e": "e:0", "fz": {"f": "f:0", "z": "z:0"}}
with NodeStepper(self.sess, fetches) as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(13, len(sorted_nodes))
# Check the topological order of the sorted nodes.
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("f"))
self.assertLess(sorted_nodes.index("f_y"), sorted_nodes.index("f"))
closure_elements = stepper.closure_elements()
self.assertIn("x/read:0", closure_elements)
self.assertIn("e:0", closure_elements)
self.assertIn("f:0", closure_elements)
self.assertEqual([0], stepper.output_slots_in_closure("x/read"))
self.assertEqual([0], stepper.output_slots_in_closure("e"))
self.assertEqual([0], stepper.output_slots_in_closure("f"))
result = stepper.finalize()
if i == 0 or i == 1 or i == 3 or i == 4:
self.assertAllClose(24.0, result[0])
self.assertAllClose(10.0, result[1][0])
self.assertAllClose(-4.0, result[1][1])
elif i == 2 or i == 5:
self.assertAllClose(24.0, result["e"])
self.assertAllClose(10.0, result["fz"]["f"])
self.assertAllClose(-4.0, result["fz"]["z"])
class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
def setUp(self):
self.ph0 = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph0")
self.ph1 = array_ops.placeholder(dtypes.float32, shape=(2, 1), name="ph1")
self.x = math_ops.matmul(self.ph0, self.ph1, name="x")
self.y = math_ops.add(self.x, self.ph1, name="y")
self.sess = session.Session()
def tearDown(self):
ops.reset_default_graph()
def testGetTensorValueWorksOnPlaceholder(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0"))
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0:0"))
with self.assertRaisesRegexp(
KeyError,
r"The name 'ph0:1' refers to a Tensor which does not exist"):
stepper.get_tensor_value("ph0:1")
def testIsPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertTrue(stepper.is_placeholder(self.ph0.name))
self.assertTrue(stepper.is_placeholder(self.ph1.name))
self.assertFalse(stepper.is_placeholder(self.x.name))
self.assertFalse(stepper.is_placeholder(self.y.name))
with self.assertRaisesRegexp(ValueError,
"A is not in the transitive closure"):
self.assertFalse(stepper.is_placeholder("A"))
def testPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertSetEqual({"ph0", "ph1"}, set(stepper.placeholders()))
def testContWithPlaceholders(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertEqual(4, len(stepper.sorted_nodes()))
self.assertSetEqual({"ph0:0", "ph1:0", "x:0", "y:0"},
set(stepper.closure_elements()))
result = stepper.cont(self.x)
self.assertAllClose([[0.0], [5.5]], result)
self.assertEqual({
"ph0:0": NodeStepper.FEED_TYPE_CLIENT,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
self.assertEqual(["x:0"], stepper.handle_names())
self.assertSetEqual({"x"}, stepper.handle_node_names())
result = stepper.cont(self.y)
self.assertAllClose([[-1.0], [6.0]], result)
self.assertEqual({
"x:0": NodeStepper.FEED_TYPE_HANDLE,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
def testAttemptToContToPlaceholderWithTensorFeedKeysShouldWork(self):
"""Continuing to a placeholder should be allowed, using client feed."""
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess, self.y, feed_dict={
self.ph0: ph0_feed,
self.ph1: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
def testAttemptToContToPlaceholderWithTensorNameFeedKeysShouldWork(self):
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0.name: ph0_feed,
self.ph1.name: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
class StepperAssignAddTest(test_util.TensorFlowTestCase):
def setUp(self):
self.v = variables.Variable(10.0, name="v")
self.p = math_ops.add(self.v, self.v, name="p")
self.q = math_ops.multiply(self.p, self.p, name="q")
self.delta = constant_op.constant(2.0, name="delta")
self.v_add = state_ops.assign_add(self.v, self.delta, name="v_add")
self.v_add_plus_one = math_ops.add(self.v_add,
1.0,
name="v_add_plus_one")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
def testLastUpdatedVariablesReturnsNoneBeforeAnyContCalls(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertIsNone(stepper.last_updated())
def testContToUpdateInvalidatesDumpedIntermedates(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
self.assertAllClose(
12.0, stepper.cont(
self.v_add, invalidate_from_updated_variables=True))
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertItemsEqual(["v:0"], stepper.dirty_variables())
# Updating the value of v by calling v_add should have invalidated the
# dumped intermediate tensors for v/read:0 and p:0.
self.assertItemsEqual(["delta:0"], stepper.intermediate_tensor_names())
with self.assertRaisesRegexp(
ValueError,
r"This stepper instance does not have access to the value of tensor "
r"\"p:0\""):
stepper.get_tensor_value("p:0")
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the updated value.
self.assertAllClose(576.0, stepper.cont("q:0"))
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({}, stepper.last_feed_types())
def testOverridingUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
stepper.override_tensor("v/read:0", 11.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
# Overriding the upstream v/read:0 should have invalidated the dumped
# intermediate tensor for the downstream p:0.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the overriding value.
self.assertAllClose(484.0, stepper.cont("q:0"))
self.assertEqual({
"v/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testRemovingOverrideToUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
stepper.override_tensor("v/read:0", 9.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
self.assertAllClose(324.0, stepper.cont(self.q))
self.assertItemsEqual(["p:0"], stepper.intermediate_tensor_names())
stepper.remove_override("v/read:0")
self.assertItemsEqual([], stepper.override_names())
# Removing the pre-existing override to v/read:0 should have invalidated
# the dumped intermediate tensor.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
def testRepeatedCallsToAssignAddDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add) as stepper:
stepper.cont(self.v_add)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
def testRepeatedCallsToAssignAddDownStreamDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add_plus_one) as stepper:
stepper.cont(self.v_add_plus_one)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add_plus_one)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add_plus_one:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
class StepperBackwardRunTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Test setup.
Structure of the forward graph:
f
| |
----- -----
| |
d e
| | | |
--- --------- ---
| | |
a b c
Construct a backward graph using the GradientDescentOptimizer.
"""
self.a = variables.Variable(1.0, name="a")
self.b = variables.Variable(2.0, name="b")
self.c = variables.Variable(4.0, name="c")
self.d = math_ops.multiply(self.a, self.b, name="d")
self.e = math_ops.multiply(self.b, self.c, name="e")
self.f = math_ops.multiply(self.d, self.e, name="f")
# Gradient descent optimizer that minimizes g.
gradient_descent.GradientDescentOptimizer(0.01).minimize(
self.f, name="optim")
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToUpdateA(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("a:0")
self.assertAllClose(1.0, result)
self.assertEqual({}, stepper.last_feed_types())
result = stepper.cont("optim/learning_rate:0")
self.assertAllClose(0.01, result)
self.assertEqual({}, stepper.last_feed_types())
# Before any cont calls on ApplyGradientDescent, there should be no
# "dirty" variables.
self.assertEqual(set(), stepper.dirty_variables())
# First, all the two control inputs to optim.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True)
# Now variable a should have been marked as dirty due to the update
# by optim/update_a/ApplyGradientDescent.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIsNone(result)
self.assertEqual({
"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Check that Variable "a" has been updated properly, but "b", "c" and "d"
# remain the same.
# For backprop on Variable a:
# Because f = a * b * b * c, df / da = b * b * c.
# 1.0 - learning_rate * b * b * c
# = 1.0 - 0.01 * 2.0 * 2.0 * 4.0 = 0.84.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContToUpdateB(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True)
self.assertIsNone(result)
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual(set(["b:0"]), stepper.dirty_variables())
# For backprop on Variable b:
# Because f = a * b * b * c, df / da = 2 * a * b * c.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContAfterUpdateWithoutRestoringVariableValue(self):
with NodeStepper(self.sess, "optim") as stepper:
# First, update Variable a from 1.0 to 0.84.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual(set(["a:0"]), stepper.dirty_variables())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# Tracking of the updated variables should have invalidated all
# intermediate tensors downstream to a:0.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertNotIn("d:0", stepper.intermediate_tensor_names())
# Second, update Variable b without the default restore_variable_values.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# For the backprop on Variable b under the updated value of a:
# 2.0 - learning_rate * 2 * a' * b * c
# = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.8656, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContNotInvalidatingFromVariableUpdatesWorksForNextUpdate(self):
with NodeStepper(self.sess, "optim") as stepper:
self.assertIsNone(stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=False))
# Even though invalidate_from_updated_variables is set to False, dirty
# variables should still have been tracked.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIn("a/read:0", stepper.intermediate_tensor_names())
self.assertIn("b/read:0", stepper.intermediate_tensor_names())
self.assertIn("c/read:0", stepper.intermediate_tensor_names())
self.assertIn("d:0", stepper.intermediate_tensor_names())
self.assertIn("e:0", stepper.intermediate_tensor_names())
self.assertIn("optim/learning_rate:0",
stepper.intermediate_tensor_names())
self.assertNotIn("a:0", stepper.intermediate_tensor_names())
self.assertNotIn("b:0", stepper.intermediate_tensor_names())
self.assertNotIn("c:0", stepper.intermediate_tensor_names())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# For the backprop on Variable b, the result should reflect the original
# value of Variable a, even though Variable a has actually been updated.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertIsNone(stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=False,
restore_variable_values=False))
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testUpdateTwiceRestoreVariable(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
result = stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Variables a and c should have been restored and hence no longer dirty.
# Variable b should have been marked as dirty.
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual({"b:0"}, stepper.dirty_variables())
# The result of the update should be identitcal to as if only update_b is
# run.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
"""Test tensor handlers are using only during clean transitive closure.
"clean" means no Variables have been updated by preceding cont() calls.
"""
with NodeStepper(self.sess, "optim") as stepper:
# First, call cont() on the two tensors on the intermediate level: e and
# f.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertItemsEqual(["d:0"], stepper.handle_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
result = stepper.cont("e:0")
self.assertAllClose(8.0, result)
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertItemsEqual(["d:0", "e:0"], stepper.handle_names())
self.assertItemsEqual(["a/read:0", "b/read:0", "c/read:0"],
stepper.intermediate_tensor_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
# Now run update_a, so as to let Variable a be dirty.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Due to the update to the value of a:0, the dumped intermediate a/read:0
# should have been invalidated.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
# Now, run update_b.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=True)
self.assertIsNone(result)
# The last cont() run should have use the handle of tensor e, but not the
# handle of tensor d, because the transitive closure of e is clean,
# whereas that of d is dirty due to the update to a in the previous cont()
# call.
last_feed_types = stepper.last_feed_types()
self.assertNotIn("d:0", last_feed_types)
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["b/read:0"])
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["c/read:0"])
# The result of the update_b should be identical to as if no other
# update_* cont() calls have occurred before.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testRestoreVariableValues(self):
"""Test restore_variable_values() restores the old values of variables."""
with NodeStepper(self.sess, "optim") as stepper:
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertAllClose(1.84, self.sess.run(self.b))
stepper.restore_variable_values()
self.assertAllClose(2.0, self.sess.run(self.b))
def testFinalize(self):
"""Test finalize() to restore variables and run the original fetch."""
with NodeStepper(self.sess, "optim") as stepper:
# Invoke update_b before calling finalize.
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
result = stepper.finalize()
self.assertIsNone(result)
# The results of the Variable updates should be the same as if no cont()
# call has occurred on update_b.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(3.96, self.sess.run(self.c))
def testOverrideThenContToUpdateThenRemoveOverrideThenUpdateAgain(self):
"""Test cont() to update nodes after overriding tensor values."""
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
# Override the value from 1.0 to 10.0.
stepper.override_tensor("a/read:0", 10.0)
self.assertEqual(["a/read:0"], stepper.override_names())
result = stepper.cont(
"optim/update_c/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# The last cont() call should have not used the tensor handle to d:0,
# because the transitive closure of d:0 contains an override tensor.
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE,
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The tensor handle to d:0 should have been removed due to the dirty
# transitive closure.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 4.0 - learning_rate * a * b * b
# = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
self.assertAllClose(3.6, self.sess.run(self.c))
# Now remove the overriding value of a/read:0.
stepper.remove_override("a/read:0")
self.assertEqual([], stepper.override_names())
# Obtain the tensor handle to d:0 again.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
self.assertNotIn("a/read:0", stepper.last_feed_types())
# Then call update_c again, without restoring c.
result = stepper.cont("optim/update_c/ApplyGradientDescent",
restore_variable_values=False)
self.assertIsNone(result)
self.assertNotIn("a/read:0", stepper.last_feed_types())
# This time, the d:0 tensor handle should have been used, because its
# transitive closure is clean.
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
"d:0": NodeStepper.FEED_TYPE_HANDLE,
"optim/learning_rate:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 3.6 - learning_rate * a * b * b
# = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
self.assertAllClose(3.56, self.sess.run(self.c))
def testContToNodeWithOutputTensors(self):
"""cont() to an op should cache its output tensors if appropriate."""
with NodeStepper(self.sess, "optim") as stepper:
# In the transitive closure of the stepper, look for an op of which the
# output tensor also is in the transitive closure.
# Do not assume a specific op, e.g., ""gradients/e_grad/Reshape_1",
# because it may vary between builds.
closure_elements = stepper.closure_elements()
op_with_output_in_closure = None
for element_name in closure_elements:
if element_name + ":0" in closure_elements:
op_with_output_in_closure = str(element_name)
break
self.assertEqual(
[0], stepper.output_slots_in_closure(op_with_output_in_closure))
self.assertIsNotNone(op_with_output_in_closure)
output_tensor = op_with_output_in_closure + ":0"
# The op "gradients/?_grad/Reshape_1" is in the transitive closure of the
# stepper, because it is the control input to another o. However, its
# output tensor "gradients/?_grad/Reshape_1:0" is also in the transitive
# closure, because it is the (non-control) input of certain ops. Calling
# cont() on the op should lead to the caching of the tensor handle for
# the output tensor.
stepper.cont(op_with_output_in_closure)
self.assertEqual([output_tensor], stepper.handle_names())
self.assertSetEqual({op_with_output_in_closure},
stepper.handle_node_names())
# Do a cont() call that uses the cached tensor of
# "gradients/?_grad/Reshape_1:0".
stepper.cont(output_tensor)
self.assertEqual({
output_tensor: NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
if __name__ == "__main__":
googletest.main()
|
seecr/meresco-lucene | refs/heads/master | meresco/lucene/remote/__init__.py | 1 | ## begin license ##
#
# "Meresco Lucene" is a set of components and tools to integrate Lucene into Meresco
#
# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl
# Copyright (C) 2015, 2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2021 SURF https://www.surf.nl
# Copyright (C) 2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Lucene"
#
# "Meresco Lucene" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Lucene" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Lucene"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from ._remote import LuceneRemote
from ._service import LuceneRemoteService
from ._conversion import Conversion |
JoakimLindbom/agocontrol | refs/heads/master | devices/gc100/agogc100.py | 4 | #!/usr/bin/python
import time
import threading
import socket
import struct
import re
import agoclient
GC100_ANNOUNCE_MCAST_IP="239.255.250.250"
GC100_ANNOUNCE_PORT=9131
GC100_COMM_PORT=4998
BUFFER_SIZE=8192
client = agoclient.AgoConnection("gc100")
devices = {}
def sendcommand(host, port, command):
# print "connecting to", host, "port", port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(command)
data = s.recv(BUFFER_SIZE)
s.close()
return data
def getdevices(host, port):
return sendcommand(host, port, "getdevices\r")
def getir(host, port, addr):
return sendcommand(host, port , "get_IR,%s" % addr)
def setstate(host, port, addr, state):
return sendcommand(host, port, "setstate,%s,%i\r" % (addr, state))
def discover(arg, stop):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', GC100_ANNOUNCE_PORT))
mreq = struct.pack("4sl", socket.inet_aton(GC100_ANNOUNCE_MCAST_IP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
print "Listening for GC100 devices"
while(not stop.is_set()):
data, addr = sock.recvfrom(1024)
m = re.search("<-Model=(.*?)>", data)
model = m.group(1)
m = re.search("<Config-URL=http://(.*?)>", data)
address = m.group(1)
if address not in devices:
print "Found", model, "on", address
devices[address]=model;
stop = threading.Event()
t = threading.Thread(target=discover,args=(1,stop))
t.daemon = True
t.start()
time.sleep(63)
stop.set()
t.join()
print "finished discovery"
class WatchInputs(threading.Thread):
def __init__(self, addr):
threading.Thread.__init__(self)
self.addr = addr
def run(self):
print "Watching inputs on", self.addr
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.addr, GC100_COMM_PORT))
while(True):
data = s.recv(BUFFER_SIZE)
print data
try:
event, module, state = str(data).split(',')
if 'statechange' in event:
if '0' in state:
client.emitEvent("%s/%s" % (self.addr, module), "event.device.statechanged", 255, "")
else:
client.emitEvent("%s/%s" % (self.addr, module), "event.device.statechanged", 0, "")
except ValueError, e:
print "value error", e, data
s.close()
for addr in devices:
print "Scanning", devices[addr], "on", addr
devicestr= str(getdevices(addr, GC100_COMM_PORT))
for device in devicestr.split('\r'):
if 'endlistdevices' in device:
break;
print device
try:
dev, module, type = device.split(',')
if '3 RELAY' in type:
for x in range(1, 4):
client.addDevice("%s/%s:%i" % (addr, module, x), "switch")
if '3 IR' in type:
for x in range(1, 4):
if 'SENSOR_NOTIFY' in getir(addr, GC100_COMM_PORT, "%s:%i" % (module, x)):
client.addDevice("%s/%s:%i" % (addr, module, x), "binarysensor")
except ValueError, e:
print "value error", e, device
notificationThread = WatchInputs(addr)
notificationThread.setDaemon(True)
notificationThread.start()
def messageHandler(internalid, content):
addr, connector = internalid.split('/')
if "command" in content:
if content["command"] == "on":
print "switching on: " + internalid
reply = setstate(addr, GC100_COMM_PORT, connector, 1)
# state,3:1,1
name, tmpconn, state = reply.split(',')
if "1" in state:
client.emitEvent(internalid, "event.device.statechanged", "255", "")
if content["command"] == "off":
print "switching off: " + internalid
reply = setstate(addr, GC100_COMM_PORT, connector, 0)
name, tmpconn, state = reply.split(',')
if "0" in state:
client.emitEvent(internalid, "event.device.statechanged", "0", "")
client.addHandler(messageHandler)
print "Waiting for messages"
client.run()
|
steebchen/youtube-dl | refs/heads/master | youtube_dl/extractor/pornovoisines.py | 64 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/videos/show/(?P<id>\d+)/(?P<display_id>[^/.]+)'
_TEST = {
'url': 'http://www.pornovoisines.com/videos/show/919/recherche-appartement.html',
'md5': '6f8aca6a058592ab49fe701c8ba8317b',
'info_dict': {
'id': '919',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:fe10cb92ae2dd3ed94bb4080d11ff493',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutante', 'Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
'subtitles': {
'fr': [{
'ext': 'vtt',
}]
},
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
settings_url = self._download_json(
'http://www.pornovoisines.com/api/video/%s/getsettingsurl/' % video_id,
video_id, note='Getting settings URL')['video_settings_url']
settings = self._download_json(settings_url, video_id)['data']
formats = []
for kind, data in settings['variants'].items():
if kind == 'HLS':
formats.extend(self._extract_m3u8_formats(
data, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
elif kind == 'MP4':
for item in data:
formats.append({
'url': item['url'],
'height': item.get('height'),
'bitrate': item.get('bitrate'),
})
self._sort_formats(formats)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
# The webpage has a bug - there's no space between "thumb" and src=
thumbnail = self._html_search_regex(
r'<img[^>]+class=([\'"])thumb\1[^>]*src=([\'"])(?P<url>[^"]+)\2',
webpage, 'thumbnail', fatal=False, group='url')
upload_date = unified_strdate(self._search_regex(
r'Le\s*<b>([\d/]+)', webpage, 'upload date', fatal=False))
duration = settings.get('main', {}).get('duration')
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_regex(
r'(?s)Catégories\s*:\s*<b>(.+?)</b>', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
subtitles = {'fr': [{
'url': subtitle,
} for subtitle in settings.get('main', {}).get('vtt_tracks', {}).values()]}
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
'subtitles': subtitles,
}
|
freeflightsim/ffs-app-engine | refs/heads/master | google_appengine/google/net/proto2/python/internal/__init__.py | 1333 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
eshijia/magnum | refs/heads/master | magnum/common/pythonk8sclient/swagger_client/models/v1_status.py | 5 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1Status(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ListMeta',
'status': 'str',
'message': 'str',
'reason': 'str',
'details': 'V1StatusDetails',
'code': 'int'
}
self.attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'status': 'status',
'message': 'message',
'reason': 'reason',
'details': 'details',
'code': 'code'
}
self._kind = None
self._api_version = None
self._metadata = None
self._status = None
self._message = None
self._reason = None
self._details = None
self._code = None
@property
def kind(self):
"""
Gets the kind of this V1Status.
kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:return: The kind of this V1Status.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Status.
kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:param kind: The kind of this V1Status.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1Status.
version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources
:return: The api_version of this V1Status.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1Status.
version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources
:param api_version: The api_version of this V1Status.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1Status.
standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#metadata
:return: The metadata of this V1Status.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Status.
standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#metadata
:param metadata: The metadata of this V1Status.
:type: V1ListMeta
"""
self._metadata = metadata
@property
def status(self):
"""
Gets the status of this V1Status.
status of the operation; either Success, or Failure; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#spec-and-status
:return: The status of this V1Status.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1Status.
status of the operation; either Success, or Failure; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#spec-and-status
:param status: The status of this V1Status.
:type: str
"""
self._status = status
@property
def message(self):
"""
Gets the message of this V1Status.
human-readable description of the status of this operation
:return: The message of this V1Status.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1Status.
human-readable description of the status of this operation
:param message: The message of this V1Status.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V1Status.
machine-readable description of why this operation is in the 'Failure' status; if this value is empty there is no information available; a reason clarifies an HTTP status code but does not override it
:return: The reason of this V1Status.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1Status.
machine-readable description of why this operation is in the 'Failure' status; if this value is empty there is no information available; a reason clarifies an HTTP status code but does not override it
:param reason: The reason of this V1Status.
:type: str
"""
self._reason = reason
@property
def details(self):
"""
Gets the details of this V1Status.
extended data associated with the reason; each reason may define its own extended details; this field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type
:return: The details of this V1Status.
:rtype: V1StatusDetails
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this V1Status.
extended data associated with the reason; each reason may define its own extended details; this field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type
:param details: The details of this V1Status.
:type: V1StatusDetails
"""
self._details = details
@property
def code(self):
"""
Gets the code of this V1Status.
suggested HTTP return code for this status; 0 if not set
:return: The code of this V1Status.
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this V1Status.
suggested HTTP return code for this status; 0 if not set
:param code: The code of this V1Status.
:type: int
"""
self._code = code
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.