repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
chdecultot/erpnext
refs/heads/develop
erpnext/buying/doctype/supplier_scorecard/supplier_scorecard.py
23
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import throw, _ from frappe.model.document import Document import time from datetime import timedelta from frappe.utils import nowdate, get_last_day, getdate, add_days, add_years from erpnext.buying.doctype.supplier_scorecard_period.supplier_scorecard_period import make_supplier_scorecard class SupplierScorecard(Document): def validate(self): self.validate_standings() self.validate_criteria_weights() self.calculate_total_score() self.update_standing() def on_update(self): score = make_all_scorecards(self.name) if score > 0: self.save() def validate_standings(self): # Check that there are no overlapping scores and check that there are no missing scores score = 0 for c1 in self.standings: for c2 in self.standings: if c1 != c2: if (c1.max_grade > c2.min_grade and c1.min_grade < c2.max_grade): throw(_('Overlap in scoring between {0} and {1}').format(c1.standing_name,c2.standing_name)) if c2.min_grade == score: score = c2.max_grade if score < 100: throw(_('Unable to find score starting at {0}. You need to have standing scores covering 0 to 100').format(score)) def validate_criteria_weights(self): weight = 0 for c in self.criteria: weight += c.weight if weight != 100: throw(_('Criteria weights must add up to 100%')) def calculate_total_score(self): scorecards = frappe.db.sql(""" SELECT scp.name FROM `tabSupplier Scorecard Period` scp WHERE scp.scorecard = %(sc)s ORDER BY scp.end_date DESC""", {"sc": self.name}, as_dict=1) period = 0 total_score = 0 total_max_score = 0 for scp in scorecards: my_sc = frappe.get_doc('Supplier Scorecard Period', scp.name) my_scp_weight = self.weighting_function my_scp_weight = my_scp_weight.replace('{period_number}', str(period)) my_scp_maxweight = my_scp_weight.replace('{total_score}', '100') my_scp_weight = my_scp_weight.replace('{total_score}', str(my_sc.total_score)) max_score = my_sc.calculate_weighted_score(my_scp_maxweight) score = my_sc.calculate_weighted_score(my_scp_weight) total_score += score total_max_score += max_score period += 1 if total_max_score > 0: self.supplier_score = round(100.0 * (total_score / total_max_score) ,1) else: self.supplier_score = 100 def update_standing(self): # Get the setup document for standing in self.standings: if (not standing.min_grade or (standing.min_grade <= self.supplier_score)) and \ (not standing.max_grade or (standing.max_grade > self.supplier_score)): self.status = standing.standing_name self.indicator_color = standing.standing_color self.notify_supplier = standing.notify_supplier self.notify_employee = standing.notify_employee self.employee_link = standing.employee_link #Update supplier standing info for fieldname in ('prevent_pos', 'prevent_rfqs','warn_rfqs','warn_pos'): self.set(fieldname, standing.get(fieldname)) frappe.db.set_value("Supplier", self.supplier, fieldname, self.get(fieldname)) @frappe.whitelist() def get_timeline_data(doctype, name): # Get a list of all the associated scorecards scs = frappe.get_doc(doctype, name) out = {} timeline_data = {} scorecards = frappe.db.sql(""" SELECT sc.name FROM `tabSupplier Scorecard Period` sc WHERE sc.scorecard = %(scs)s""", {"scs": scs.name}, as_dict=1) for sc in scorecards: start_date, end_date, total_score = frappe.db.get_value('Supplier Scorecard Period', sc.name, ['start_date', 'end_date', 'total_score']) for single_date in daterange(start_date, end_date): timeline_data[time.mktime(single_date.timetuple())] = total_score out['timeline_data'] = timeline_data return out def daterange(start_date, end_date): for n in range(int ((end_date - start_date).days)+1): yield start_date + timedelta(n) def refresh_scorecards(): scorecards = frappe.db.sql(""" SELECT sc.name FROM `tabSupplier Scorecard` sc""", {}, as_dict=1) for sc in scorecards: # Check to see if any new scorecard periods are created if make_all_scorecards(sc.name) > 0: # Save the scorecard to update the score and standings sc.save() @frappe.whitelist() def make_all_scorecards(docname): sc = frappe.get_doc('Supplier Scorecard', docname) supplier = frappe.get_doc('Supplier',sc.supplier) start_date = getdate(supplier.creation) end_date = get_scorecard_date(sc.period, start_date) todays = getdate(nowdate()) scp_count = 0 first_start_date = todays last_end_date = todays while (start_date < todays) and (end_date <= todays): # check to make sure there is no scorecard period already created scorecards = frappe.db.sql(""" SELECT scp.name FROM `tabSupplier Scorecard Period` scp WHERE scp.scorecard = %(sc)s AND ( (scp.start_date > %(end_date)s AND scp.end_date < %(start_date)s) OR (scp.start_date < %(end_date)s AND scp.end_date > %(start_date)s)) ORDER BY scp.end_date DESC""", {"sc": docname, "start_date": start_date, "end_date": end_date, "supplier": supplier}, as_dict=1) if len(scorecards) == 0: period_card = make_supplier_scorecard(docname, None) period_card.start_date = start_date period_card.end_date = end_date period_card.save() scp_count = scp_count + 1 if start_date < first_start_date: first_start_date = start_date last_end_date = end_date start_date = getdate(add_days(end_date,1)) end_date = get_scorecard_date(sc.period, start_date) if scp_count > 0: frappe.msgprint(_("Created {0} scorecards for {1} between: ").format(scp_count, sc.supplier) + str(first_start_date) + " - " + str(last_end_date)) return scp_count def get_scorecard_date(period, start_date): if period == 'Per Week': end_date = getdate(add_days(start_date,7)) elif period == 'Per Month': end_date = get_last_day(start_date) elif period == 'Per Year': end_date = add_days(add_years(start_date,1), -1) return end_date def make_default_records(): install_variable_docs = [ {"param_name": "total_accepted_items", "variable_label": "Total Accepted Items", \ "path": "get_total_accepted_items"}, {"param_name": "total_accepted_amount", "variable_label": "Total Accepted Amount", \ "path": "get_total_accepted_amount"}, {"param_name": "total_rejected_items", "variable_label": "Total Rejected Items", \ "path": "get_total_rejected_items"}, {"param_name": "total_rejected_amount", "variable_label": "Total Rejected Amount", \ "path": "get_total_rejected_amount"}, {"param_name": "total_received_items", "variable_label": "Total Received Items", \ "path": "get_total_received_items"}, {"param_name": "total_received_amount", "variable_label": "Total Received Amount", \ "path": "get_total_received_amount"}, {"param_name": "rfq_response_days", "variable_label": "RFQ Response Days", \ "path": "get_rfq_response_days"}, {"param_name": "sq_total_items", "variable_label": "SQ Total Items", \ "path": "get_sq_total_items"}, {"param_name": "sq_total_number", "variable_label": "SQ Total Number", \ "path": "get_sq_total_number"}, {"param_name": "rfq_total_number", "variable_label": "RFQ Total Number", \ "path": "get_rfq_total_number"}, {"param_name": "rfq_total_items", "variable_label": "RFQ Total Items", \ "path": "get_rfq_total_items"}, {"param_name": "tot_item_days", "variable_label": "Total Item Days", \ "path": "get_item_workdays"}, {"param_name": "on_time_shipment_num", "variable_label": "# of On Time Shipments", "path": \ "get_on_time_shipments"}, {"param_name": "cost_of_delayed_shipments", "variable_label": "Cost of Delayed Shipments", \ "path": "get_cost_of_delayed_shipments"}, {"param_name": "cost_of_on_time_shipments", "variable_label": "Cost of On Time Shipments", \ "path": "get_cost_of_on_time_shipments"}, {"param_name": "total_working_days", "variable_label": "Total Working Days", \ "path": "get_total_workdays"}, {"param_name": "tot_cost_shipments", "variable_label": "Total Cost of Shipments", \ "path": "get_total_cost_of_shipments"}, {"param_name": "tot_days_late", "variable_label": "Total Days Late", \ "path": "get_total_days_late"}, {"param_name": "total_shipments", "variable_label": "Total Shipments", \ "path": "get_total_shipments"} ] install_standing_docs = [ {"min_grade": 0.0, "prevent_rfqs": 1, "notify_supplier": 0, "max_grade": 30.0, "prevent_pos": 1, \ "standing_color": "Red", "notify_employee": 0, "standing_name": "Very Poor"}, {"min_grade": 30.0, "prevent_rfqs": 1, "notify_supplier": 0, "max_grade": 50.0, "prevent_pos": 0, \ "standing_color": "Red", "notify_employee": 0, "standing_name": "Poor"}, {"min_grade": 50.0, "prevent_rfqs": 0, "notify_supplier": 0, "max_grade": 80.0, "prevent_pos": 0, \ "standing_color": "Green", "notify_employee": 0, "standing_name": "Average"}, {"min_grade": 80.0, "prevent_rfqs": 0, "notify_supplier": 0, "max_grade": 100.0, "prevent_pos": 0, \ "standing_color": "Blue", "notify_employee": 0, "standing_name": "Excellent"}, ] for d in install_variable_docs: try: d['doctype'] = "Supplier Scorecard Variable" frappe.get_doc(d).insert() except frappe.NameError: pass for d in install_standing_docs: try: d['doctype'] = "Supplier Scorecard Standing" frappe.get_doc(d).insert() except frappe.NameError: pass
ychfan/tensorflow
refs/heads/master
tensorflow/python/keras/_impl/keras/utils/io_utils_test.py
34
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for io_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import numpy as np from tensorflow.python.keras._impl import keras from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None def create_dataset(h5_path='test.h5'): x = np.random.randn(200, 10).astype('float32') y = np.random.randint(0, 2, size=(200, 1)) f = h5py.File(h5_path, 'w') # Creating dataset to store features x_dset = f.create_dataset('my_data', (200, 10), dtype='f') x_dset[:] = x # Creating dataset to store labels y_dset = f.create_dataset('my_labels', (200, 1), dtype='i') y_dset[:] = y f.close() class TestIOUtils(test.TestCase): def test_HDF5Matrix(self): if h5py is None: return temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir) h5_path = os.path.join(temp_dir, 'test.h5') create_dataset(h5_path) # Instantiating HDF5Matrix for the training set, # which is a slice of the first 150 elements x_train = keras.utils.io_utils.HDF5Matrix( h5_path, 'my_data', start=0, end=150) y_train = keras.utils.io_utils.HDF5Matrix( h5_path, 'my_labels', start=0, end=150) # Likewise for the test set x_test = keras.utils.io_utils.HDF5Matrix( h5_path, 'my_data', start=150, end=200) y_test = keras.utils.io_utils.HDF5Matrix( h5_path, 'my_labels', start=150, end=200) # HDF5Matrix behave more or less like Numpy matrices # with regard to indexing self.assertEqual(y_train.shape, (150, 1)) # But they do not support negative indices, so don't try print(x_train[-1]) self.assertEqual(y_train.dtype, np.dtype('i')) self.assertEqual(y_train.ndim, 2) self.assertEqual(y_train.size, 150) model = keras.models.Sequential() model.add(keras.layers.Dense(64, input_shape=(10,), activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd') # Note: you have to use shuffle='batch' or False with HDF5Matrix model.fit(x_train, y_train, batch_size=32, shuffle='batch', verbose=False) # test that evalutation and prediction # don't crash and return reasonable results out_pred = model.predict(x_test, batch_size=32, verbose=False) out_eval = model.evaluate(x_test, y_test, batch_size=32, verbose=False) self.assertEqual(out_pred.shape, (50, 1)) self.assertEqual(out_eval.shape, ()) self.assertGreater(out_eval, 0) if __name__ == '__main__': test.main()
nsone/nsone-cli
refs/heads/develop
ns1cli/commands/cmd_monitor.py
3
import click from ns1cli.cli import cli from ns1cli.util import Formatter from nsone.rest.resource import ResourceException class MonitorFormatter(Formatter): def print_monitor(self, mdata): regions = mdata.pop('regions') status = mdata.pop('status') rules = mdata.pop('rules') self.pretty_print(mdata) click.secho('REGIONS:', bold=True) for r in regions: self.out(' ' + r) click.secho('STATUS:', bold=True) for s in status: self.pretty_print(status[s], 4) click.secho('RULES:', bold=True) for r in rules: self.pretty_print(r, 4) @click.group('monitor', short_help='View monitoring jobs') @click.pass_context def cli(ctx): """View monitoring jobs.""" ctx.obj.formatter = MonitorFormatter(ctx.obj.get_config('output_format')) ctx.obj.monitor_api = ctx.obj.rest.monitors() @cli.command('list', short_help='List all active monitors') @click.option('--include', multiple=True, help='Display additional data', type=click.Choice(['id', 'job_type'])) @click.pass_context def list(ctx, include): """List of all monitoring jobs for the account, including configuration and current status details. Job status is shown per region, including the "global" region indicating the overal status of the monitoring job computed based on the status policy from the regional statuses. Status values both globally and per region include up, down, and pending. \b EXAMPLES: monitor list monitor list --include id monitor list --include id --include job_type """ try: mlist = ctx.obj.monitor_api.list() except ResourceException as e: raise click.ClickException('REST API: %s' % e.message) else: if ctx.obj.formatter.output_format == 'json': ctx.obj.formatter.out_json(mlist) return click.secho('MONITORS:', bold=True) for m in mlist: ctx.obj.formatter.out(' name: ' + m['name']) if 'id' in include: ctx.obj.formatter.out(' id: ' + m['id']) if 'job_type' in include: ctx.obj.formatter.out(' job_type: ' + m['job_type']) ctx.obj.formatter.out('') @cli.command('info', short_help='Get monitor details') @click.argument('JOBID') @click.pass_context def info(ctx, jobid): """Display details for a specific monitoring job based on its JOBID, including configuration and current status details. Job status is shown per region, including the "global" region indicating the overall status of the monitoring job computed based on the status policy from the regional statuses. Status values both globally and per region include up, down, and pending. \b EXAMPLES: monitor info 531a047f830f7803d5f0d2ca """ try: mdata = ctx.obj.monitor_api.retrieve(jobid) except ResourceException as e: raise click.ClickException('REST API: %s' % e.message) else: if ctx.obj.formatter.output_format == 'json': ctx.obj.formatter.out_json(mdata) return ctx.obj.formatter.print_monitor(mdata)
sorenh/cc
refs/heads/master
vendor/Twisted-10.0.0/twisted/__init__.py
13
# -*- test-case-name: twisted -*- # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ Twisted: The Framework Of Your Internet. """ # Ensure the user is running the version of python we require. import sys if not hasattr(sys, "version_info") or sys.version_info < (2,3): raise RuntimeError("Twisted requires Python 2.3 or later.") del sys # Ensure compat gets imported from twisted.python import compat del compat # setup version from twisted._version import version __version__ = version.short()
nexiles/odoo
refs/heads/8.0
addons/auth_oauth/auth_oauth.py
321
from openerp.osv import osv, fields class auth_oauth_provider(osv.osv): """Class defining the configuration values of an OAuth2 provider""" _name = 'auth.oauth.provider' _description = 'OAuth2 provider' _order = 'name' _columns = { 'name' : fields.char('Provider name', required=True), # Name of the OAuth2 entity, Google, LinkedIn, etc 'client_id' : fields.char('Client ID'), # Our identifier 'auth_endpoint' : fields.char('Authentication URL', required=True), # OAuth provider URL to authenticate users 'scope' : fields.char('Scope'), # OAUth user data desired to access 'validation_endpoint' : fields.char('Validation URL', required=True),# OAuth provider URL to validate tokens 'data_endpoint' : fields.char('Data URL'), 'enabled' : fields.boolean('Allowed'), 'css_class' : fields.char('CSS class'), 'body' : fields.char('Body', required=True), 'sequence' : fields.integer(), } _defaults = { 'enabled' : False, 'css_class' : "zocial", }
ericlink/adms-server
refs/heads/master
playframework-dist/1.1-src/python/Lib/lib-tk/FileDialog.py
4
"""File selection dialog classes. Classes: - FileDialog - LoadFileDialog - SaveFileDialog """ from Tkinter import * from Dialog import Dialog import os import fnmatch dialogstates = {} class FileDialog: """Standard file selection dialog -- no checks on selected file. Usage: d = FileDialog(master) fname = d.go(dir_or_file, pattern, default, key) if fname is None: ...canceled... else: ...open file... All arguments to go() are optional. The 'key' argument specifies a key in the global dictionary 'dialogstates', which keeps track of the values for the directory and pattern arguments, overriding the values passed in (it does not keep track of the default argument!). If no key is specified, the dialog keeps no memory of previous state. Note that memory is kept even when the dialog is canceled. (All this emulates the behavior of the Macintosh file selection dialogs.) """ title = "File Selection Dialog" def __init__(self, master, title=None): if title is None: title = self.title self.master = master self.directory = None self.top = Toplevel(master) self.top.title(title) self.top.iconname(title) self.botframe = Frame(self.top) self.botframe.pack(side=BOTTOM, fill=X) self.selection = Entry(self.top) self.selection.pack(side=BOTTOM, fill=X) self.selection.bind('<Return>', self.ok_event) self.filter = Entry(self.top) self.filter.pack(side=TOP, fill=X) self.filter.bind('<Return>', self.filter_command) self.midframe = Frame(self.top) self.midframe.pack(expand=YES, fill=BOTH) self.filesbar = Scrollbar(self.midframe) self.filesbar.pack(side=RIGHT, fill=Y) self.files = Listbox(self.midframe, exportselection=0, yscrollcommand=(self.filesbar, 'set')) self.files.pack(side=RIGHT, expand=YES, fill=BOTH) btags = self.files.bindtags() self.files.bindtags(btags[1:] + btags[:1]) self.files.bind('<ButtonRelease-1>', self.files_select_event) self.files.bind('<Double-ButtonRelease-1>', self.files_double_event) self.filesbar.config(command=(self.files, 'yview')) self.dirsbar = Scrollbar(self.midframe) self.dirsbar.pack(side=LEFT, fill=Y) self.dirs = Listbox(self.midframe, exportselection=0, yscrollcommand=(self.dirsbar, 'set')) self.dirs.pack(side=LEFT, expand=YES, fill=BOTH) self.dirsbar.config(command=(self.dirs, 'yview')) btags = self.dirs.bindtags() self.dirs.bindtags(btags[1:] + btags[:1]) self.dirs.bind('<ButtonRelease-1>', self.dirs_select_event) self.dirs.bind('<Double-ButtonRelease-1>', self.dirs_double_event) self.ok_button = Button(self.botframe, text="OK", command=self.ok_command) self.ok_button.pack(side=LEFT) self.filter_button = Button(self.botframe, text="Filter", command=self.filter_command) self.filter_button.pack(side=LEFT, expand=YES) self.cancel_button = Button(self.botframe, text="Cancel", command=self.cancel_command) self.cancel_button.pack(side=RIGHT) self.top.protocol('WM_DELETE_WINDOW', self.cancel_command) # XXX Are the following okay for a general audience? self.top.bind('<Alt-w>', self.cancel_command) self.top.bind('<Alt-W>', self.cancel_command) def go(self, dir_or_file=os.curdir, pattern="*", default="", key=None): if key and dialogstates.has_key(key): self.directory, pattern = dialogstates[key] else: dir_or_file = os.path.expanduser(dir_or_file) if os.path.isdir(dir_or_file): self.directory = dir_or_file else: self.directory, default = os.path.split(dir_or_file) self.set_filter(self.directory, pattern) self.set_selection(default) self.filter_command() self.selection.focus_set() self.top.wait_visibility() # window needs to be visible for the grab self.top.grab_set() self.how = None self.master.mainloop() # Exited by self.quit(how) if key: directory, pattern = self.get_filter() if self.how: directory = os.path.dirname(self.how) dialogstates[key] = directory, pattern self.top.destroy() return self.how def quit(self, how=None): self.how = how self.master.quit() # Exit mainloop() def dirs_double_event(self, event): self.filter_command() def dirs_select_event(self, event): dir, pat = self.get_filter() subdir = self.dirs.get('active') dir = os.path.normpath(os.path.join(self.directory, subdir)) self.set_filter(dir, pat) def files_double_event(self, event): self.ok_command() def files_select_event(self, event): file = self.files.get('active') self.set_selection(file) def ok_event(self, event): self.ok_command() def ok_command(self): self.quit(self.get_selection()) def filter_command(self, event=None): dir, pat = self.get_filter() try: names = os.listdir(dir) except os.error: self.master.bell() return self.directory = dir self.set_filter(dir, pat) names.sort() subdirs = [os.pardir] matchingfiles = [] for name in names: fullname = os.path.join(dir, name) if os.path.isdir(fullname): subdirs.append(name) elif fnmatch.fnmatch(name, pat): matchingfiles.append(name) self.dirs.delete(0, END) for name in subdirs: self.dirs.insert(END, name) self.files.delete(0, END) for name in matchingfiles: self.files.insert(END, name) head, tail = os.path.split(self.get_selection()) if tail == os.curdir: tail = '' self.set_selection(tail) def get_filter(self): filter = self.filter.get() filter = os.path.expanduser(filter) if filter[-1:] == os.sep or os.path.isdir(filter): filter = os.path.join(filter, "*") return os.path.split(filter) def get_selection(self): file = self.selection.get() file = os.path.expanduser(file) return file def cancel_command(self, event=None): self.quit() def set_filter(self, dir, pat): if not os.path.isabs(dir): try: pwd = os.getcwd() except os.error: pwd = None if pwd: dir = os.path.join(pwd, dir) dir = os.path.normpath(dir) self.filter.delete(0, END) self.filter.insert(END, os.path.join(dir or os.curdir, pat or "*")) def set_selection(self, file): self.selection.delete(0, END) self.selection.insert(END, os.path.join(self.directory, file)) class LoadFileDialog(FileDialog): """File selection dialog which checks that the file exists.""" title = "Load File Selection Dialog" def ok_command(self): file = self.get_selection() if not os.path.isfile(file): self.master.bell() else: self.quit(file) class SaveFileDialog(FileDialog): """File selection dialog which checks that the file may be created.""" title = "Save File Selection Dialog" def ok_command(self): file = self.get_selection() if os.path.exists(file): if os.path.isdir(file): self.master.bell() return d = Dialog(self.top, title="Overwrite Existing File Question", text="Overwrite existing file %r?" % (file,), bitmap='questhead', default=1, strings=("Yes", "Cancel")) if d.num != 0: return else: head, tail = os.path.split(file) if not os.path.isdir(head): self.master.bell() return self.quit(file) def test(): """Simple test program.""" root = Tk() root.withdraw() fd = LoadFileDialog(root) loadfile = fd.go(key="test") fd = SaveFileDialog(root) savefile = fd.go(key="test") print loadfile, savefile if __name__ == '__main__': test()
jmschrei/scikit-learn
refs/heads/master
sklearn/datasets/tests/test_lfw.py
230
"""This test for the LFW require medium-size data dowloading and processing If the data has not been already downloaded by running the examples, the tests won't run (skipped). If the test are run, the first execution will be long (typically a bit more than a couple of minutes) but as the dataset loader is leveraging joblib, successive runs will be fast (less than 200ms). """ import random import os import shutil import tempfile import numpy as np from sklearn.externals import six try: try: from scipy.misc import imsave except ImportError: from scipy.misc.pilutil import imsave except ImportError: imsave = None from sklearn.datasets import load_lfw_pairs from sklearn.datasets import load_lfw_people from sklearn.datasets import fetch_lfw_pairs from sklearn.datasets import fetch_lfw_people from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import SkipTest from sklearn.utils.testing import raises SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home') FAKE_NAMES = [ 'Abdelatif_Smith', 'Abhati_Kepler', 'Camara_Alvaro', 'Chen_Dupont', 'John_Lee', 'Lin_Bauman', 'Onur_Lopez', ] def setup_module(): """Test fixture run once and common to all tests of this module""" if imsave is None: raise SkipTest("PIL not installed.") if not os.path.exists(LFW_HOME): os.makedirs(LFW_HOME) random_state = random.Random(42) np_rng = np.random.RandomState(42) # generate some random jpeg files for each person counts = {} for name in FAKE_NAMES: folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name) if not os.path.exists(folder_name): os.makedirs(folder_name) n_faces = np_rng.randint(1, 5) counts[name] = n_faces for i in range(n_faces): file_path = os.path.join(folder_name, name + '_%04d.jpg' % i) uniface = np_rng.randint(0, 255, size=(250, 250, 3)) try: imsave(file_path, uniface) except ImportError: raise SkipTest("PIL not installed") # add some random file pollution to test robustness with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f: f.write(six.b('Text file to be ignored by the dataset loader.')) # generate some pairing metadata files using the same format as LFW with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f: f.write(six.b("10\n")) more_than_two = [name for name, count in six.iteritems(counts) if count >= 2] for i in range(5): name = random_state.choice(more_than_two) first, second = random_state.sample(range(counts[name]), 2) f.write(six.b('%s\t%d\t%d\n' % (name, first, second))) for i in range(5): first_name, second_name = random_state.sample(FAKE_NAMES, 2) first_index = random_state.choice(np.arange(counts[first_name])) second_index = random_state.choice(np.arange(counts[second_name])) f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index, second_name, second_index))) with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) def teardown_module(): """Test fixture (clean up) run once after all tests of this module""" if os.path.isdir(SCIKIT_LEARN_DATA): shutil.rmtree(SCIKIT_LEARN_DATA) if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA): shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA) @raises(IOError) def test_load_empty_lfw_people(): fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) def test_load_lfw_people_deprecation(): msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be " "removed in 0.19." "Use fetch_lfw_people(download_if_missing=False) instead.") assert_warns_message(DeprecationWarning, msg, load_lfw_people, data_home=SCIKIT_LEARN_DATA) def test_load_fake_lfw_people(): lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3, download_if_missing=False) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_people.images.shape, (10, 62, 47)) assert_equal(lfw_people.data.shape, (10, 2914)) # the target is array of person integer ids assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2]) # names of the persons can be found using the target_names array expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez'] assert_array_equal(lfw_people.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion and not limit on the number of picture per person lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True, download_if_missing=False) assert_equal(lfw_people.images.shape, (17, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]) assert_array_equal(lfw_people.target_names, ['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro', 'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez']) @raises(ValueError) def test_load_fake_lfw_people_too_restrictive(): fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False) @raises(IOError) def test_load_empty_lfw_pairs(): fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False) def test_load_lfw_pairs_deprecation(): msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be " "removed in 0.19." "Use fetch_lfw_pairs(download_if_missing=False) instead.") assert_warns_message(DeprecationWarning, msg, load_lfw_pairs, data_home=SCIKIT_LEARN_DATA) def test_load_fake_lfw_pairs(): lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47)) # the target is whether the person is the same or not assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) # names of the persons can be found using the target_names array expected_classes = ['Different persons', 'Same person'] assert_array_equal(lfw_pairs_train.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True, download_if_missing=False) assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) assert_array_equal(lfw_pairs_train.target_names, expected_classes)
xxd3vin/spp-sdk
refs/heads/master
opt/Python27/Lib/test/badsyntax_future8.py
202
"""This is a test""" from __future__ import * def f(x): def g(y): return x + y return g print f(2)(4)
flower-pot/xf-indicator
refs/heads/master
xf_indicator_lib/add_project_window.py
1
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # Copyright (C) 2014 Frederic Branczyk fbranczyk@gmail.com # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### END LICENSE ### DO NOT EDIT THIS FILE ### """this window adjusts settings """ from gi.repository import Gtk # pylint: disable=E0611 import logging logger = logging.getLogger('xf_indicator_lib') from . helpers import get_builder, show_uri, get_help_uri class AddProjectWindow(Gtk.Window): __gtype_name__ = "AddProjectWindow" def __new__(cls): """Special static method that's automatically called by Python when constructing a new instance of this class. Returns a fully instantiated AddProjectWindow object. """ builder = get_builder('AddProjectXfIndicatorWindow') new_object = builder.get_object("add_project_xf_indicator_window") new_object.finish_initializing(builder) return new_object def finish_initializing(self, builder): """Called while initializing this instance in __new__ finish_initalizing should be called after parsing the ui definition and creating a PreferencesWindow object with it in order to finish initializing the start of the new PerferencesXfIndicatorWindow instance. Put your initialization code in here and leave __init__ undefined. """ # Get a reference to the builder and set up the signals. self.builder = builder self.ui = builder.get_ui(self, True) # code for other initialization actions should be added here
ianyh/heroku-buildpack-python-opencv
refs/heads/master
vendor/setuptools-2.1/setuptools/tests/test_dist_info.py
452
"""Test .dist-info style distributions. """ import os import shutil import tempfile import unittest import textwrap try: import ast except: pass import pkg_resources from setuptools.tests.py26compat import skipIf def DALS(s): "dedent and left-strip" return textwrap.dedent(s).lstrip() class TestDistInfo(unittest.TestCase): def test_distinfo(self): dists = {} for d in pkg_resources.find_distributions(self.tmpdir): dists[d.project_name] = d assert len(dists) == 2, dists unversioned = dists['UnversionedDistribution'] versioned = dists['VersionedDistribution'] assert versioned.version == '2.718' # from filename assert unversioned.version == '0.3' # from METADATA @skipIf('ast' not in globals(), "ast is used to test conditional dependencies (Python >= 2.6)") def test_conditional_dependencies(self): requires = [pkg_resources.Requirement.parse('splort==4'), pkg_resources.Requirement.parse('quux>=1.1')] for d in pkg_resources.find_distributions(self.tmpdir): self.assertEqual(d.requires(), requires[:1]) self.assertEqual(d.requires(extras=('baz',)), requires) self.assertEqual(d.extras, ['baz']) def setUp(self): self.tmpdir = tempfile.mkdtemp() versioned = os.path.join(self.tmpdir, 'VersionedDistribution-2.718.dist-info') os.mkdir(versioned) metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+') try: metadata_file.write(DALS( """ Metadata-Version: 1.2 Name: VersionedDistribution Requires-Dist: splort (4) Provides-Extra: baz Requires-Dist: quux (>=1.1); extra == 'baz' """)) finally: metadata_file.close() unversioned = os.path.join(self.tmpdir, 'UnversionedDistribution.dist-info') os.mkdir(unversioned) metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+') try: metadata_file.write(DALS( """ Metadata-Version: 1.2 Name: UnversionedDistribution Version: 0.3 Requires-Dist: splort (==4) Provides-Extra: baz Requires-Dist: quux (>=1.1); extra == 'baz' """)) finally: metadata_file.close() def tearDown(self): shutil.rmtree(self.tmpdir)
OwnROM-Devices/OwnKernel-sprout
refs/heads/own-mm
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
resmo/ansible
refs/heads/devel
lib/ansible/modules/network/onyx/onyx_ptp_global.py
59
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: onyx_ptp_global version_added: "2.8" author: "Anas Badaha (@anasb)" short_description: Configures PTP Global parameters description: - This module provides declarative management of PTP Global configuration on Mellanox ONYX network devices. notes: - Tested on ONYX 3.6.8130 ptp and ntp protocols cannot be enabled at the same time options: ptp_state: description: - PTP state. choices: ['enabled', 'disabled'] default: enabled ntp_state: description: - NTP state. choices: ['enabled', 'disabled'] domain: description: - "set PTP domain number Range 0-127" primary_priority: description: - "set PTP primary priority Range 0-225" secondary_priority: description: - "set PTP secondary priority Range 0-225" """ EXAMPLES = """ - name: configure PTP onyx_ptp_global: ntp_state: enabled ptp_state: disabled domain: 127 primary_priority: 128 secondary_priority: 128 """ RETURN = """ commands: description: The list of configuration mode commands to send to the device. returned: always type: list sample: - no ntp enable - protocol ptp - ptp domain 127 """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.onyx.onyx import show_cmd from ansible.module_utils.network.onyx.onyx import BaseOnyxModule class OnyxPtpGlobalModule(BaseOnyxModule): def init_module(self): """ initialize module """ element_spec = dict( ntp_state=dict(choices=['enabled', 'disabled']), ptp_state=dict(choices=['enabled', 'disabled'], default='enabled'), domain=dict(type=int), primary_priority=dict(type=int), secondary_priority=dict(type=int) ) argument_spec = dict() argument_spec.update(element_spec) self._module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) def get_required_config(self): module_params = self._module.params self._required_config = dict(module_params) self._validate_param_values(self._required_config) def _validate_param_values(self, obj, param=None): super(OnyxPtpGlobalModule, self).validate_param_values(obj, param) if obj['ntp_state'] == 'enabled' and obj['ptp_state'] == 'enabled': self._module.fail_json(msg='PTP State and NTP State Can not be enabled at the same time') def validate_domain(self, value): if value and not 0 <= int(value) <= 127: self._module.fail_json(msg='domain must be between 0 and 127') def validate_primary_priority(self, value): if value and not 0 <= int(value) <= 255: self._module.fail_json(msg='Primary Priority must be between 0 and 255') def validate_secondary_priority(self, value): if value and not 0 <= int(value) <= 255: self._module.fail_json(msg='Secondary Priority must be between 0 and 255') def _set_ntp_config(self, ntp_config): ntp_config = ntp_config[0] if not ntp_config: return ntp_state = ntp_config.get('NTP enabled') if ntp_state == "yes": self._current_config['ntp_state'] = "enabled" else: self._current_config['ntp_state'] = "disabled" def _set_ptp_config(self, ptp_config): if ptp_config is None: self._current_config['ptp_state'] = 'disabled' else: self._current_config['ptp_state'] = 'enabled' self._current_config['domain'] = int(ptp_config['Domain']) self._current_config['primary_priority'] = int(ptp_config['Priority1']) self._current_config['secondary_priority'] = int(ptp_config['Priority2']) def _show_ntp_config(self): cmd = "show ntp configured" return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) def _show_ptp_config(self): cmd = "show ptp clock" return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) def load_current_config(self): self._current_config = dict() ntp_config = self._show_ntp_config() self._set_ntp_config(ntp_config) ptp_config = self._show_ptp_config() self._set_ptp_config(ptp_config) def generate_commands(self): ntp_state = self._required_config.get("ntp_state") if ntp_state == "enabled": self._enable_ntp() elif ntp_state == "disabled": self._disable_ntp() ptp_state = self._required_config.get("ptp_state", "enabled") if ptp_state == "enabled": self._enable_ptp() else: self._disable_ptp() domain = self._required_config.get("domain") if domain is not None: curr_domain = self._current_config.get("domain") if domain != curr_domain: self._commands.append('ptp domain %d' % domain) primary_priority = self._required_config.get("primary_priority") if primary_priority is not None: curr_primary_priority = self._current_config.get("primary_priority") if primary_priority != curr_primary_priority: self._commands.append('ptp priority1 %d' % primary_priority) secondary_priority = self._required_config.get("secondary_priority") if secondary_priority is not None: curr_secondary_priority = self._current_config.get("secondary_priority") if secondary_priority != curr_secondary_priority: self._commands.append('ptp priority2 %d' % secondary_priority) def _enable_ptp(self): curr_ptp_state = self._current_config['ptp_state'] if curr_ptp_state == 'disabled': self._commands.append('protocol ptp') def _disable_ptp(self): curr_ptp_state = self._current_config['ptp_state'] if curr_ptp_state == 'enabled': self._commands.append('no protocol ptp') def _enable_ntp(self): curr_ntp_state = self._current_config.get('ntp_state') if curr_ntp_state == 'disabled': self._commands.append('ntp enable') def _disable_ntp(self): curr_ntp_state = self._current_config['ntp_state'] if curr_ntp_state == 'enabled': self._commands.append('no ntp enable') def main(): """ main entry point for module execution """ OnyxPtpGlobalModule.main() if __name__ == '__main__': main()
pp-mo/iris
refs/heads/master
lib/iris/fileformats/_pp_lbproc_pairs.py
5
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. import itertools # LBPROC codes and their English equivalents LBPROC_PAIRS = ( (1, "Difference from another experiment"), (2, "Difference from zonal (or other spatial) mean"), (4, "Difference from time mean"), (8, "X-derivative (d/dx)"), (16, "Y-derivative (d/dy)"), (32, "Time derivative (d/dt)"), (64, "Zonal mean field"), (128, "Time mean field"), (256, "Product of two fields"), (512, "Square root of a field"), (1024, "Difference between fields at levels BLEV and BRLEV"), (2048, "Mean over layer between levels BLEV and BRLEV"), (4096, "Minimum value of field during time period"), (8192, "Maximum value of field during time period"), (16384, "Magnitude of a vector, not specifically wind speed"), (32768, "Log10 of a field"), (65536, "Variance of a field"), (131072, "Mean over an ensemble of parallel runs"), ) # lbproc_map is dict mapping lbproc->English and English->lbproc # essentially a one to one mapping LBPROC_MAP = { x: y for x, y in itertools.chain( LBPROC_PAIRS, ((y, x) for x, y in LBPROC_PAIRS) ) }
diamond-org/flask-diamond
refs/heads/master
setup.py
1
# -*- coding: utf-8 -*- # Flask-Diamond (c) Ian Dennis Miller import re import os import codecs from setuptools import setup, find_packages def read(*rnames): return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read() def grep(attrname): pattern = r"{0}\W*=\W*'([^']+)'".format(attrname) strval, = re.findall(pattern, read('flask_diamond/__meta__.py')) return strval setup( version=grep('__version__'), name='Flask-Diamond', description=( "Flask-Diamond is a batteries-included Flask framework, " "sortof like Django but radically decomposable. " "Flask-Diamond offers some opinions about " "data-centric Internet applications and systems." ), packages=find_packages(), scripts=[ "bin/flask-diamond", ], long_description=read('Readme.rst'), classifiers=[ "Development Status :: 4 - Beta", "Framework :: Flask", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: MacOS :: MacOS X", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP", ], include_package_data=True, keywords='', author=grep('__author__'), author_email=grep('__email__'), url=grep('__url__'), install_requires=read('requirements.txt'), license='MIT', zip_safe=False, )
pi19404/mbed
refs/heads/master
workspace_tools/host_tests/echo_flow_control.py
125
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from host_test import Test class EchoTest(Test): def __init__(self): Test.__init__(self) self.mbed.init_serial() self.mbed.extra_serial.rtscts = True self.mbed.reset() def test(self): self.mbed.flush() self.notify("Starting the ECHO test") TEST="longer serial test" check = True for i in range(1, 100): self.mbed.extra_serial.write(TEST + "\n") l = self.mbed.extra_serial.readline().strip() if not l: continue if l != TEST: check = False self.notify('"%s" != "%s"' % (l, TEST)) else: if (i % 10) == 0: self.notify('.') return check if __name__ == '__main__': EchoTest().run()
dominicgs/GreatFET-experimental
refs/heads/master
host/greatfet/gnuradio/gladiolus.py
2
# # Gladiolus source/sink for Software Defined IR # import array import numpy as np from gnuradio import gr from greatfet import GreatFETSingleton from .block import GreatFETStreamingSource class GladiolusSource(GreatFETStreamingSource): def set_up_streaming(self, gain, dc_coupled): self.coupling = dc_coupled self.gf.sdir.set_gain(gain) self.gf.sdir.set_coupling(not dc_coupled) print(dc_coupled) return self.gf.sdir.start_receive() def tear_down_streaming(self): self.gf.sdir.stop()
beni55/sentry
refs/heads/master
src/sentry/plugins/bases/tag.py
2
""" sentry.plugins.bases.tag ~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from sentry.models import Group from sentry.plugins import Plugin class TagPlugin(Plugin): tag = None tag_label = None project_default_enabled = True def get_tag_values(self, event, **kwargs): """ Must return a list of values. >>> get_tag_pairs(event) [tag1, tag2, tag3] """ raise NotImplementedError def get_tags(self, event, **kwargs): return [(self.tag, v) for v in self.get_tag_values(event)] def post_process(self, group, event, is_new, is_sample, **kwargs): # legacy compatibility for older plugins if not hasattr(Plugin, 'get_tags'): Group.objects.add_tags(group, self.get_tags(event))
googlei18n/ufo2ft
refs/heads/master
Lib/ufo2ft/featureWriters/baseFeatureWriter.py
2
from __future__ import ( print_function, division, absolute_import, unicode_literals, ) from fontTools.misc.py23 import SimpleNamespace from collections import OrderedDict import logging from ufo2ft.featureWriters import ast class BaseFeatureWriter(object): """Abstract features writer. The `tableTag` class attribute (str) states the tag of the OpenType Layout table which the generated features are intended for. For example: "GPOS", "GSUB", "BASE", etc. The `features` class attribute defines the set of all the features that this writer supports. If you want to only write some of the available features you can provide a smaller sequence to 'features' constructor argument. By the default all the features supported by this writer will be outputted. Two writing modes are defined here: 1) "skip" (default) will not write features if already present; 2) "append" will add additional lookups to an existing feature, if present, or it will add a new one at the end of all features. Subclasses can set a different default mode or define a different set of `_SUPPORTED_MODES`. The `options` class attribute contains a mapping of option names with their default values. These can be overridden on an instance by passing keyword arguments to the constructor. """ tableTag = None features = frozenset() mode = "skip" options = {} _SUPPORTED_MODES = frozenset(["skip", "append"]) def __init__(self, features=None, mode=None, **kwargs): if features is not None: features = frozenset(features) assert features, "features cannot be empty" unsupported = features.difference(self.__class__.features) if unsupported: raise ValueError("unsupported: %s" % ", ".join(unsupported)) self.features = features if mode is not None: self.mode = mode if self.mode not in self._SUPPORTED_MODES: raise ValueError(self.mode) options = dict(self.__class__.options) for k in kwargs: if k not in options: raise TypeError("unsupported keyword argument: %r" % k) options[k] = kwargs[k] self.options = SimpleNamespace(**options) logger = ".".join([self.__class__.__module__, self.__class__.__name__]) self.log = logging.getLogger(logger) def setContext(self, font, feaFile, compiler=None): """ Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance. """ todo = set(self.features) if self.mode == "skip": existing = ast.findFeatureTags(feaFile) todo.difference_update(existing) self.context = SimpleNamespace( font=font, feaFile=feaFile, compiler=compiler, todo=todo ) return self.context def shouldContinue(self): """ Decide whether to start generating features or return early. Returns a boolean: True to proceed, False to skip. Sublcasses may override this to skip generation based on the presence or lack of other required pieces of font data. """ if not self.context.todo: self.log.debug("No features to be generated; skipped") return False return True def write(self, font, feaFile, compiler=None): """Write features and class definitions for this font to a feaLib FeatureFile object. Returns True if feature file was modified, False if no new features were generated. """ self.setContext(font, feaFile, compiler=compiler) try: if self.shouldContinue(): return self._write() else: return False finally: del self.context def _write(self): """Subclasses must override this.""" raise NotImplementedError def makeUnicodeToGlyphNameMapping(self): """Return the Unicode to glyph name mapping for the current font. """ # Try to get the "best" Unicode cmap subtable if this writer is running # in the context of a FeatureCompiler, else create a new mapping from # the UFO glyphs compiler = self.context.compiler cmap = None if compiler is not None: table = compiler.ttFont.get("cmap") if table is not None: cmap = table.getBestCmap() if cmap is None: from ufo2ft.util import makeUnicodeToGlyphNameMapping if compiler is not None: glyphSet = compiler.glyphSet else: glyphSet = self.context.font cmap = makeUnicodeToGlyphNameMapping(glyphSet) return cmap def getOrderedGlyphSet(self): """Return OrderedDict[glyphName, glyph] sorted by glyphOrder. """ compiler = self.context.compiler if compiler is not None: return compiler.glyphSet from ufo2ft.util import makeOfficialGlyphOrder glyphSet = self.context.font glyphOrder = makeOfficialGlyphOrder(self.context.font) return OrderedDict((gn, glyphSet[gn]) for gn in glyphOrder) def compileGSUB(self): """Compile a temporary GSUB table from the current feature file. """ from ufo2ft.util import compileGSUB compiler = self.context.compiler if compiler is not None: # The result is cached in the compiler instance, so if another # writer requests one it is not compiled again. if hasattr(compiler, "_gsub"): return compiler._gsub glyphOrder = compiler.ttFont.getGlyphOrder() else: # the 'real' glyph order doesn't matter because the table is not # compiled to binary, only the glyph names are used glyphOrder = sorted(self.context.font.keys()) gsub = compileGSUB(self.context.feaFile, glyphOrder) if compiler and not hasattr(compiler, "_gsub"): compiler._gsub = gsub return gsub
seem-sky/kbengine
refs/heads/master
kbe/src/lib/python/Lib/idlelib/SearchDialog.py
70
from tkinter import * from idlelib import SearchEngine from idlelib.SearchDialogBase import SearchDialogBase def _setup(text): root = text._root() engine = SearchEngine.get(root) if not hasattr(engine, "_searchdialog"): engine._searchdialog = SearchDialog(root, engine) return engine._searchdialog def find(text): pat = text.get("sel.first", "sel.last") return _setup(text).open(text,pat) def find_again(text): return _setup(text).find_again(text) def find_selection(text): return _setup(text).find_selection(text) class SearchDialog(SearchDialogBase): def create_widgets(self): f = SearchDialogBase.create_widgets(self) self.make_button("Find Next", self.default_command, 1) def default_command(self, event=None): if not self.engine.getprog(): return self.find_again(self.text) def find_again(self, text): if not self.engine.getpat(): self.open(text) return False if not self.engine.getprog(): return False res = self.engine.search_text(text) if res: line, m = res i, j = m.span() first = "%d.%d" % (line, i) last = "%d.%d" % (line, j) try: selfirst = text.index("sel.first") sellast = text.index("sel.last") if selfirst == first and sellast == last: text.bell() return False except TclError: pass text.tag_remove("sel", "1.0", "end") text.tag_add("sel", first, last) text.mark_set("insert", self.engine.isback() and first or last) text.see("insert") return True else: text.bell() return False def find_selection(self, text): pat = text.get("sel.first", "sel.last") if pat: self.engine.setcookedpat(pat) return self.find_again(text) def _search_dialog(parent): root = Tk() root.title("Test SearchDialog") width, height, x, y = list(map(int, re.split('[x+]', parent.geometry()))) root.geometry("+%d+%d"%(x, y + 150)) text = Text(root) text.pack() text.insert("insert","This is a sample string.\n"*10) def show_find(): text.tag_add(SEL, "1.0", END) s = _setup(text) s.open(text) text.tag_remove(SEL, "1.0", END) button = Button(root, text="Search", command=show_find) button.pack() if __name__ == '__main__': from idlelib.idle_test.htest import run run(_search_dialog)
ProfessorX/Config
refs/heads/master
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QTextDocumentWriter.py
1
# encoding: utf-8 # module PyQt4.QtGui # from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so # by generator 1.135 # no doc # imports import PyQt4.QtCore as __PyQt4_QtCore class QTextDocumentWriter(): # skipped bases: <class 'sip.simplewrapper'> """ QTextDocumentWriter() QTextDocumentWriter(QIODevice, QByteArray) QTextDocumentWriter(str, QByteArray format=QByteArray()) """ def codec(self): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.codec() -> QTextCodec """ pass def device(self): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.device() -> QIODevice """ pass def fileName(self): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.fileName() -> str """ return "" def format(self): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.format() -> QByteArray """ pass def setCodec(self, QTextCodec): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.setCodec(QTextCodec) """ pass def setDevice(self, QIODevice): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.setDevice(QIODevice) """ pass def setFileName(self, p_str): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.setFileName(str) """ pass def setFormat(self, QByteArray): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.setFormat(QByteArray) """ pass def supportedDocumentFormats(self): # real signature unknown; restored from __doc__ """ QTextDocumentWriter.supportedDocumentFormats() -> list-of-QByteArray """ pass def write(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads """ QTextDocumentWriter.write(QTextDocument) -> bool QTextDocumentWriter.write(QTextDocumentFragment) -> bool """ return False def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads pass __weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default """list of weak references to the object (if defined)"""
bsmith3541/rapcollab
refs/heads/master
venv/lib/python2.7/site-packages/flask/testsuite/test_apps/flaskext/oldext_simple.py
629
ext_id = 'oldext_simple'
DavidLP/home-assistant
refs/heads/dev
homeassistant/components/ihc/binary_sensor.py
7
"""Support for IHC binary sensors.""" from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.const import CONF_TYPE from . import IHC_CONTROLLER, IHC_DATA, IHC_INFO from .const import CONF_INVERTING from .ihcdevice import IHCDevice def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the IHC binary sensor platform.""" if discovery_info is None: return devices = [] for name, device in discovery_info.items(): ihc_id = device['ihc_id'] product_cfg = device['product_cfg'] product = device['product'] # Find controller that corresponds with device id ctrl_id = device['ctrl_id'] ihc_key = IHC_DATA.format(ctrl_id) info = hass.data[ihc_key][IHC_INFO] ihc_controller = hass.data[ihc_key][IHC_CONTROLLER] sensor = IHCBinarySensor( ihc_controller, name, ihc_id, info, product_cfg.get(CONF_TYPE), product_cfg[CONF_INVERTING], product) devices.append(sensor) add_entities(devices) class IHCBinarySensor(IHCDevice, BinarySensorDevice): """IHC Binary Sensor. The associated IHC resource can be any in or output from a IHC product or function block, but it must be a boolean ON/OFF resources. """ def __init__(self, ihc_controller, name, ihc_id: int, info: bool, sensor_type: str, inverting: bool, product=None) -> None: """Initialize the IHC binary sensor.""" super().__init__(ihc_controller, name, ihc_id, info, product) self._state = None self._sensor_type = sensor_type self.inverting = inverting @property def device_class(self): """Return the class of this sensor.""" return self._sensor_type @property def is_on(self): """Return true if the binary sensor is on/open.""" return self._state def on_ihc_change(self, ihc_id, value): """IHC resource has changed.""" if self.inverting: self._state = not value else: self._state = value self.schedule_update_ha_state()
sdcooke/django
refs/heads/master
django/utils/archive.py
562
""" Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/ Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import shutil import tarfile import zipfile from django.utils import six class ArchiveException(Exception): """ Base exception class for all archive errors. """ class UnrecognizedArchiveFormat(ArchiveException): """ Error raised when passed file is not a recognized archive format. """ def extract(path, to_path=''): """ Unpack the tar or zip file at the specified path to the directory specified by to_path. """ with Archive(path) as archive: archive.extract(to_path) class Archive(object): """ The external API class that encapsulates an archive implementation. """ def __init__(self, file): self._archive = self._archive_cls(file)(file) @staticmethod def _archive_cls(file): cls = None if isinstance(file, six.string_types): filename = file else: try: filename = file.name except AttributeError: raise UnrecognizedArchiveFormat( "File object not a recognized archive format.") base, tail_ext = os.path.splitext(filename.lower()) cls = extension_map.get(tail_ext) if not cls: base, ext = os.path.splitext(base) cls = extension_map.get(ext) if not cls: raise UnrecognizedArchiveFormat( "Path not a recognized archive format: %s" % filename) return cls def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def extract(self, to_path=''): self._archive.extract(to_path) def list(self): self._archive.list() def close(self): self._archive.close() class BaseArchive(object): """ Base Archive class. Implementations should inherit this class. """ def split_leading_dir(self, path): path = str(path) path = path.lstrip('/').lstrip('\\') if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or '\\' not in path): return path.split('/', 1) elif '\\' in path: return path.split('\\', 1) else: return path, '' def has_leading_dir(self, paths): """ Returns true if all the paths have the same leading path name (i.e., everything is in one subdirectory in an archive) """ common_prefix = None for path in paths: prefix, rest = self.split_leading_dir(path) if not prefix: return False elif common_prefix is None: common_prefix = prefix elif prefix != common_prefix: return False return True def extract(self): raise NotImplementedError('subclasses of BaseArchive must provide an extract() method') def list(self): raise NotImplementedError('subclasses of BaseArchive must provide a list() method') class TarArchive(BaseArchive): def __init__(self, file): self._archive = tarfile.open(file) def list(self, *args, **kwargs): self._archive.list(*args, **kwargs) def extract(self, to_path): # note: python<=2.5 doesn't seem to know about pax headers, filter them members = [member for member in self._archive.getmembers() if member.name != 'pax_global_header'] leading = self.has_leading_dir(x.name for x in members) for member in members: name = member.name if leading: name = self.split_leading_dir(name)[1] filename = os.path.join(to_path, name) if member.isdir(): if filename and not os.path.exists(filename): os.makedirs(filename) else: try: extracted = self._archive.extractfile(member) except (KeyError, AttributeError) as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) print("In the tar file %s the member %s is invalid: %s" % (name, member.name, exc)) else: dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) with open(filename, 'wb') as outfile: shutil.copyfileobj(extracted, outfile) finally: if extracted: extracted.close() def close(self): self._archive.close() class ZipArchive(BaseArchive): def __init__(self, file): self._archive = zipfile.ZipFile(file) def list(self, *args, **kwargs): self._archive.printdir(*args, **kwargs) def extract(self, to_path): namelist = self._archive.namelist() leading = self.has_leading_dir(namelist) for name in namelist: data = self._archive.read(name) if leading: name = self.split_leading_dir(name)[1] filename = os.path.join(to_path, name) dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) if filename.endswith(('/', '\\')): # A directory if not os.path.exists(filename): os.makedirs(filename) else: with open(filename, 'wb') as outfile: outfile.write(data) def close(self): self._archive.close() extension_map = { '.tar': TarArchive, '.tar.bz2': TarArchive, '.tar.gz': TarArchive, '.tgz': TarArchive, '.tz2': TarArchive, '.zip': ZipArchive, }
VitalPet/c2c-rd-addons
refs/heads/8.0
c2c_sequence_fy/account_move.py
4
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import logging class account_bank_statement(osv.osv): _inherit = "account.bank.statement" _logger = logging.getLogger(__name__) # we have to copy the method because wen need to pass period_id and journal_id to next_by_id # rest is identical def button_confirm_bank(self, cr, uid, ids, context=None): obj_seq = self.pool.get('ir.sequence') if context is None: context = {} for st in self.browse(cr, uid, ids, context=context): j_type = st.journal_id.type company_currency_id = st.journal_id.company_id.currency_id.id if not self.check_status_condition(cr, uid, st.state, journal_type=j_type): continue self.balance_check(cr, uid, st.id, journal_type=j_type, context=context) if (not st.journal_id.default_credit_account_id) \ or (not st.journal_id.default_debit_account_id): raise osv.except_osv(_('Configuration Error !'), _('Please verify that an account is defined in the journal.')) if not st.name == '/': st_number = st.name else: c = {'fiscalyear_id': st.period_id.fiscalyear_id.id, 'period_id': st.period_id.id, 'journal_id': st.journal_id.id} #c = {'fiscalyear_id': move.period_id.fiscalyear_id.id, 'period_id': move.period_id.id, 'journal_id': move.journal_id.id} if st.journal_id.sequence_id: st_number = obj_seq.next_by_id(cr, uid, st.journal_id.sequence_id.id, context=c) else: st_number = obj_seq.next_by_code(cr, uid, 'account.bank.statement', context=c) for line in st.move_line_ids: if line.state <> 'valid': raise osv.except_osv(_('Error !'), _('The account entries lines are not in valid state.')) for st_line in st.line_ids: if st_line.analytic_account_id: if not st.journal_id.analytic_journal_id: raise osv.except_osv(_('No Analytic Journal !'),_("You have to assign an analytic journal on the '%s' journal!") % (st.journal_id.name,)) if not st_line.amount: continue st_line_number = self.get_next_st_line_number(cr, uid, st_number, st_line, context) self.create_move_from_st_line(cr, uid, st_line.id, company_currency_id, st_line_number, context) self.write(cr, uid, [st.id], { 'name': st_number, 'balance_end_real': st.balance_end }, context=context) self.log(cr, uid, st.id, _('Statement %s is confirmed, journal items are created.') % (st_number,)) return self.write(cr, uid, ids, {'state':'confirm'}, context=context) account_bank_statement() class account_move(osv.osv): _inherit = "account.move" _logger = logging.getLogger(__name__) # we have to copy the method because wen need to pass period_id and journal_id to next_by_id # rest is identical def post(self, cr, uid, ids, context=None): if context is None: context = {} invoice = context.get('invoice', False) valid_moves = self.validate(cr, uid, ids, context) if not valid_moves: raise osv.except_osv(_('Integrity Error !'), _('You can not validate a non-balanced entry !\nMake sure you have configured payment terms properly !\nThe latest payment term line should be of the type "Balance" !')) obj_sequence = self.pool.get('ir.sequence') for move in self.browse(cr, uid, valid_moves, context=context): if move.name =='/': new_name = False journal = move.journal_id if invoice and invoice.internal_number: new_name = invoice.internal_number else: if journal.sequence_id: c = {'fiscalyear_id': move.period_id.fiscalyear_id.id, 'period_id': move.period_id.id, 'journal_id': move.journal_id.id} new_name = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, c) else: raise osv.except_osv(_('Error'), _('No sequence defined on the journal !')) if new_name: self.write(cr, uid, [move.id], {'name':new_name}) cr.execute('UPDATE account_move '\ 'SET state=%s '\ 'WHERE id IN %s', ('posted', tuple(valid_moves),)) return True # 20121010 Fgf NOT USED ANY MORE def post_incompatible(self, cr, uid, ids, context=None): self._logger.debug('post move context `%s`', context) if not context: context= {} journal_id = context.get('journal_id') period_id = [] if 'period_id' in context: period_id = [context.get('period_id')] self._logger.debug('post move period_id `%s`', period_id) invoice_obj = context.get('invoice') if invoice_obj and not journal_id: journal_id = invoice_obj.journal_id.id self._logger.debug('post move journal `%s`', journal_id) jour_obj = self.pool.get('account.journal') seq_obj = self.pool.get('ir.sequence') if journal_id: for jour in jour_obj.browse(cr, uid, [journal_id] , context=context): self._logger.debug('post jour `%s` `%s`', jour, jour.sequence_id) if jour.sequence_id: main_seq_id = jour.sequence_id.id elif jour.create_sequence in ['create','create_fy']: prefix = jour.prefix_pattern or "".join(w[0] for w in _(jour.name).split(' ')) values = \ { 'name' : jour.name , 'prefix' : prefix , 'padding' : 3 , 'implementation' : 'no_gap' } main_seq_id = seq_obj.create(cr, uid, values) jou_obj.write(cr, uid, [journal_id], {'sequence_id' : main_seq_id}) if jour.create_sequence == 'create_fy' : fy_seq_obj = self.pool.get('account.sequence.fiscalyear') period_obj = self.pool.get('account.period') if not period_id: self._logger.debug('per_id A') period_id = invoice_obj.period_id.id self._logger.debug('per_id B `%s`', period_id) if not period_id: self._logger.debug('per_id C `%s`', period_id) period_id = period_obj.find(cr, uid, invoice_obj.date_invoice, context) self._logger.debug('per_id D `%s`', period_id) if not isinstance(period_id, list) : period_id = [period_id] for period in period_obj.browse(cr, uid, period_id): self._logger.debug('fy_id `%s`', period) fy_id = period.fiscalyear_id.id fy_code = period.fiscalyear_id.code self._logger.debug('fy_id a `%s`', fy_id) fy_seq = fy_seq_obj.search(cr, uid, [('fiscalyear_id','=', fy_id),('sequence_main_id','=',main_seq_id)]) self._logger.debug('fy_seq_id `%s`', fy_seq) if not fy_seq: prefix = jour.prefix_pattern or "".join(w[0] for w in _(jour.name).split(' ')) + '-%(fy)s-' values = \ { 'name' : jour.name + ' ' + fy_code , 'prefix' : prefix , 'padding' : 3 , 'implementation' : 'no_gap' } fy_seq_id = seq_obj.create(cr, uid, values) fy_rel = \ { 'sequence_id' : fy_seq_id , 'sequence_main_id' : main_seq_id , 'fiscalyear_id' : fy_id } self._logger.debug('fy_rel `%s``%s`', fy_rel, prefix) fy_seq_obj.create(cr, uid, fy_rel) #return True return super(account_move, self).post(cr, uid, ids, context) account_move()
mayankcu/Django-social
refs/heads/master
venv/Lib/site-packages/psycopg2/tests/__init__.py
9
#!/usr/bin/env python # psycopg2 test suite # # Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org> # # psycopg2 is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # In addition, as a special exception, the copyright holders give # permission to link this program with the OpenSSL library (or with # modified versions of OpenSSL that use the same license as OpenSSL), # and distribute linked combinations including the two. # # You must obey the GNU Lesser General Public License in all respects for # all of the code used other than OpenSSL. # # psycopg2 is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. import sys from testconfig import dsn from testutils import unittest import test_async import test_bugX000 import test_bug_gc import test_cancel import test_connection import test_copy import test_cursor import test_dates import test_extras_dictcursor import test_green import test_lobject import test_module import test_notify import test_psycopg2_dbapi20 import test_quote import test_transaction import test_types_basic import test_types_extras def test_suite(): # If connection to test db fails, bail out early. import psycopg2 try: cnn = psycopg2.connect(dsn) except Exception, e: print "Failed connection to test db:", e.__class__.__name__, e print "Please set env vars 'PSYCOPG2_TESTDB*' to valid values." sys.exit(1) else: cnn.close() suite = unittest.TestSuite() suite.addTest(test_async.test_suite()) suite.addTest(test_bugX000.test_suite()) suite.addTest(test_bug_gc.test_suite()) suite.addTest(test_cancel.test_suite()) suite.addTest(test_connection.test_suite()) suite.addTest(test_copy.test_suite()) suite.addTest(test_cursor.test_suite()) suite.addTest(test_dates.test_suite()) suite.addTest(test_extras_dictcursor.test_suite()) suite.addTest(test_green.test_suite()) suite.addTest(test_lobject.test_suite()) suite.addTest(test_module.test_suite()) suite.addTest(test_notify.test_suite()) suite.addTest(test_psycopg2_dbapi20.test_suite()) suite.addTest(test_quote.test_suite()) suite.addTest(test_transaction.test_suite()) suite.addTest(test_types_basic.test_suite()) suite.addTest(test_types_extras.test_suite()) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
adaur/SickRage
refs/heads/master
lib/dogpile/cache/compat.py
60
import sys py2k = sys.version_info < (3, 0) py3k = sys.version_info >= (3, 0) py32 = sys.version_info >= (3, 2) py27 = sys.version_info >= (2, 7) jython = sys.platform.startswith('java') win32 = sys.platform.startswith('win') try: import threading except ImportError: import dummy_threading as threading # noqa if py3k: # pragma: no cover string_types = str, text_type = str string_type = str if py32: callable = callable else: def callable(fn): return hasattr(fn, '__call__') def u(s): return s def ue(s): return s import configparser import io import _thread as thread else: string_types = basestring, text_type = unicode string_type = str def u(s): return unicode(s, "utf-8") def ue(s): return unicode(s, "unicode_escape") import ConfigParser as configparser # noqa import StringIO as io # noqa callable = callable # noqa import thread # noqa if py3k or jython: import pickle else: import cPickle as pickle # noqa def timedelta_total_seconds(td): if py27: return td.total_seconds() else: return (td.microseconds + ( td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
consciousnesss/learn_theano
refs/heads/master
learn_theano/deeplearning_tutorials/test_5_rbm.py
1
#!/usr/bin/env python import theano import theano.tensor as T import numpy as np from learn_theano.utils.display_filters import tile_raster_images from learn_theano.utils.download_all_datasets import get_dataset from theano.tensor.shared_randomstreams import RandomStreams import pickle import time import cv2 def one_zero_loss(prediction_labels, labels): return T.mean(T.neq(prediction_labels, labels)) def negative_log_likelihood_loss(prediction_probailities, labels): return -T.mean(T.log(prediction_probailities)[T.arange(labels.shape[0]), labels]) def mean_cross_entropy(prediction_probailities, labels): return -T.mean(T.sum(labels*T.log(prediction_probailities) + (1-labels)*T.log(1-prediction_probailities), axis=1)) def load_dataset(dataset): set_x = theano.shared(np.asarray(dataset[0], dtype=theano.config.floatX), borrow=True) set_y = theano.shared(np.asarray(dataset[1], dtype=theano.config.floatX), borrow=True) return set_x, T.cast(set_y, 'int32') class RBM(object): def __init__(self, w_init, b_hidden_init, b_visible_init): self.W = theano.shared( np.asarray(w_init, dtype=theano.config.floatX), name='W', borrow=True) self.b_hidden = theano.shared( np.asarray(b_hidden_init, dtype=theano.config.floatX), name='b_hidden', borrow=True ) self.b_visible = theano.shared( np.asarray(b_visible_init, dtype=theano.config.floatX), name='b_visible', borrow=True ) @staticmethod def create_with_random_weights(n_visible, n_hidden, rng): w_init = rng.uniform( low=-4*np.sqrt(6./(n_visible + n_hidden)), high=4*np.sqrt(6./(n_visible + n_hidden)), size=(n_visible, n_hidden)) return RBM(w_init, np.zeros((n_hidden,), dtype=theano.config.floatX), np.zeros((n_visible,), dtype=theano.config.floatX)) def sample_hidden_given_visible(self, visible, theano_rng): hidden_linear_activations = T.dot(visible, self.W)+self.b_hidden hidden_activation = T.nnet.sigmoid(hidden_linear_activations) hidden_sample = theano_rng.binomial( size=hidden_activation.shape, n=1, p=hidden_activation, dtype=theano.config.floatX) return hidden_sample, hidden_activation, hidden_linear_activations def sample_visible_give_hidden(self, hidden, theano_rng): linear_visible_activations = T.dot(hidden, self.W.T)+self.b_visible visible_activation = T.nnet.sigmoid(linear_visible_activations) visible_sample = theano_rng.binomial( size=visible_activation.shape, n=1, p=visible_activation, dtype=theano.config.floatX) return visible_sample, visible_activation, linear_visible_activations def gibbs_update_hidden_visible_hidden(self, hidden, theano_rng): visible_sample, visible_activation, linear_visible_activation = self.sample_visible_give_hidden(hidden, theano_rng) hidden_sample, hidden_activation, hidden_linear_activation = self.sample_hidden_given_visible(visible_sample, theano_rng) return [visible_sample, visible_activation, linear_visible_activation, hidden_sample, hidden_activation, hidden_linear_activation] def gibbs_update_visible_hidden_visible(self, visible, theano_rng): hidden_sample, hidden_activation, hidden_linear_activation = self.sample_hidden_given_visible(visible, theano_rng) visible_sample, visible_activation, linear_visible_activation = self.sample_visible_give_hidden(hidden_sample, theano_rng) return [hidden_sample, hidden_activation, hidden_linear_activation, visible_sample, visible_activation, linear_visible_activation] def free_energy(self, visible_sample): visible_bias_term = T.dot(visible_sample, self.b_visible) wx_b = T.dot(visible_sample, self.W) + self.b_hidden hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1) return T.mean(-hidden_term - visible_bias_term) def get_cost_updates(self, input, learning_rate, number_of_gibbs_steps, theano_rng, persistent_state): if persistent_state is None: chain_start, _, _ = self.sample_hidden_given_visible(input, theano_rng) else: chain_start = persistent_state (visible_samples, visible_activations, linear_visible_activations, hidden_samples, hidden_activations, hidden_linear_activations), updates = theano.scan( fn=lambda x: self.gibbs_update_hidden_visible_hidden(x, theano_rng), outputs_info=[None, None, None, chain_start, None, None], n_steps=number_of_gibbs_steps ) visible_chain_end = visible_samples[-1] cost = self.free_energy(input) - self.free_energy(visible_chain_end) parameters = [self.W, self.b_hidden, self.b_visible] for p in parameters: gradient = T.grad(cost, p, consider_constant=[visible_chain_end]) updates[p] = p - T.cast(learning_rate, theano.config.floatX)*gradient if persistent_state is None: # here we use linear_visible_activations because theano can not make log(scan(sigm(..))) stable, # but it can make log(sigm(..)) stable. monitoring_cost = mean_cross_entropy(T.nnet.sigmoid(linear_visible_activations[-1]), input) else: updates[persistent_state] = hidden_samples[-1] monitoring_cost = self.get_pseudo_likelihood_cost(input, updates) return monitoring_cost, updates def get_pseudo_likelihood_cost(self, input, updates): """Stochastic approximation to the pseudo-likelihood""" # index of bit i in expression p(x_i | x_{\i}) bit_i_idx = theano.shared(value=0, name='bit_i_idx') # binarize the input image by rounding to nearest integer xi = T.round(input) # calculate free energy for the given bit configuration fe_xi = self.free_energy(xi) # flip bit x_i of matrix xi and preserve all other bits x_{\i} # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns # the result to xi_flip, instead of working in place on xi. xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx]) # calculate free energy with bit flipped fe_xi_flip = self.free_energy(xi_flip) # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i}))) n_visible = self.b_visible.get_value(borrow=True).shape[0] cost = T.mean(n_visible * T.log(T.nnet.sigmoid(fe_xi_flip - fe_xi))) # increment bit_i_idx % number as part of updates updates[bit_i_idx] = (bit_i_idx + 1) % n_visible return cost def get_parameter_values(self): return (self.W.get_value(borrow=True), self.b_hidden.get_value(borrow=True), self.b_visible.get_value(borrow=True)) def train_rbm(): batch_size = 20 learning_rate = 0.1 n_training_epochs = 15 n_visible=28*28 n_hidden=500 n_contrastive_divergence_steps=15 persistent_contrastive_divergence=True rng = np.random.RandomState(123) theano_rng = RandomStreams(rng.randint(2 ** 30)) train_set, valid_set, test_set = get_dataset('mnist') train_set_x, _ = load_dataset(train_set) test_set_x, _ = load_dataset(test_set) n_train_batches = train_set_x.get_value(borrow=True).shape[0]/batch_size x = T.matrix('x') if persistent_contrastive_divergence: persistent_chain = theano.shared( np.zeros((batch_size, n_hidden), dtype=theano.config.floatX), borrow=True) else: persistent_chain = None rbm = RBM.create_with_random_weights(n_visible, n_hidden, rng) # persistent contrastive divergence with n_contrastive_divergence_steps steps cost, updates = rbm.get_cost_updates( x, learning_rate, number_of_gibbs_steps=n_contrastive_divergence_steps, theano_rng=theano_rng, persistent_state=persistent_chain) minibatch_index = T.iscalar('minibatch_index') train_rbm = theano.function( inputs=[minibatch_index], outputs=cost, updates=updates, givens={ x: train_set_x[minibatch_index*batch_size:(minibatch_index+1)*batch_size], } ) start_time = time.time() for epoch in range(n_training_epochs): epoch_start_time = time.time() costs = [] for batch_index in range(n_train_batches): costs.append(train_rbm(batch_index)) print('Training epoch %d of %d, cost is %f, took %.1fs' % (epoch, n_training_epochs, np.mean(costs), time.time() - epoch_start_time)) filters = tile_raster_images(X=rbm.W.get_value(borrow=True).T, img_shape=(28, 28)) cv2.imshow('filter', filters) cv2.waitKey(-1) cv2.destroyWindow('filter') print ('Training took %d minutes' % ((time.time()-start_time)/60.)) return rbm.get_parameter_values() def sample_from_trained_rbm(w_init, b_hidden_init, b_visible_init): # for sampling from trained model n_chains = 20 n_samples = 10 mnist_pkl = get_dataset('mnist') with open(mnist_pkl) as f: train_set, valid_set, test_set = pickle.load(f) test_set_x, _ = load_dataset(test_set) # sample from trained RBM number_of_test_samples = test_set_x.get_value(borrow=True).shape[0] # pick random test examples, with which to initialize the persistent chain rng = np.random.RandomState(123) test_idx = rng.randint(number_of_test_samples - n_chains) persistent_vis_chain = theano.shared( np.asarray( test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains], dtype=theano.config.floatX ) ) theano_rng = RandomStreams(rng.randint(2 ** 30)) plot_every = 1000 rbm = RBM(w_init, b_hidden_init, b_visible_init) (hidden_samples, hidden_activations, hidden_linear_activations, visible_samples, visible_activations, linear_visible_activations), sampling_updates = theano.scan( fn=lambda x: rbm.gibbs_update_visible_hidden_visible(x, theano_rng), outputs_info=[None, None, None, persistent_vis_chain, None, None], n_steps=plot_every ) sampling_updates[persistent_vis_chain] = visible_samples[-1] sample_fn = theano.function( [], [ visible_activations[-1], visible_samples[-1] ], updates=sampling_updates ) image_data = np.zeros( (29 * n_samples + 1, 29 * n_chains - 1), dtype='uint8' ) for idx in xrange(n_samples): # generate `plot_every` intermediate samples that we discard, # because successive samples in the chain are too correlated vis_activations, vis_sample = sample_fn() print ' ... plotting sample ', idx image_data[29 * idx:29 * idx + 28, :] = tile_raster_images( X=vis_activations, img_shape=(28, 28), tile_shape=(1, n_chains), tile_spacing=(1, 1) ) image_data = cv2.resize(image_data, dsize=None, fx=2., fy=2.) cv2.imshow('sampling', image_data) cv2.waitKey(-1) if __name__ == "__main__": train = True if train: w_init, b_hidden_init, b_visible_init = train_rbm() with open('trained_rbm.pkl', 'w') as f: pickle.dump((w_init, b_hidden_init, b_visible_init), f, protocol=pickle.HIGHEST_PROTOCOL) with open('trained_rbm.pkl') as f: w_init, b_hidden_init, b_visible_init = pickle.load(f) sample_from_trained_rbm(w_init, b_hidden_init, b_visible_init) ''' Some profiling results: PCD-15, cpu: Training epoch 0 of 1, cost is -32.710491, took 173.8s PCD-1, cpu: Training epoch 0 of 1, cost is -7.949985, took 10.5s CD-15 cpu: Training epoch 0 of 1, cost is 220.957275, took 174.5s CD-1, cpu: Training epoch 0 of 1, cost is 94.514465, took 9.8s CD-1 for 17 iterations comparing to PCD-15 1 iteration(the same time) leads to better receptive fields but, really bad samples. PCD-1 for 17 iterations is comparable with PCD-15 for 1 iteration. CD-15 for 1 iterations generates way worse samples than PCD-15 Result 1: Persistent CD seems always better taking the same time. Result 2: Cost function values seems have different scales for different number of iterations PCD-15 for 5 iterations, 15 min, cost -14.856175 PCD-1 for 15 minutes, 15 min, -6.587661, took 10.6s Result 3: If the same amount of time is spent, number of gibbs samples doesn't seem to change performance much PCD-15 for 15 iterations - 80 minutes - nice results PCD-15 for 50 iterations - worse results. Networks seems to dream only about 8s and zeros. Result 4: It seems it is possible to overtrain the network. '''
axelkennedal/dissen
refs/heads/master
dissenEnv/lib/python3.5/site-packages/django/contrib/gis/db/models/sql/__init__.py
476
from django.contrib.gis.db.models.sql.conversion import ( AreaField, DistanceField, GeomField, GMLField, ) __all__ = [ 'AreaField', 'DistanceField', 'GeomField', 'GMLField' ]
jettisonjoe/openhtf
refs/heads/master
openhtf/plugs/device_wrapping.py
1
# Copyright 2017 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OpenHTF base plugs for thinly wrapping existing device abstractions. Sometimes you already have a Python interface to a device or instrument; you just need to put that interface in plug form to get it into your test phase. Device-wrapping plugs are your friends in such times. """ import functools import openhtf import six def short_repr(obj, max_len=40): """Returns a short, term-friendly string representation of the object. Args: obj: An object for which to return a string representation. max_len: Maximum length of the returned string. Longer reprs will be turned into a brief descriptive string giving the type and length of obj. """ obj_repr = repr(obj) if len(obj_repr) <= max_len: return obj_repr return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr)) class DeviceWrappingPlug(openhtf.plugs.BasePlug): """A base plug for wrapping existing device abstractions. Attribute access is delegated to the _device attribute, which is normally set by passing some device instance to the constructor of this base class. Subclasses can use the @conf.inject_positional_args decorator on their constructors to get any configuration needed to construct the inner device instance. Example: class BleSnifferPlug(DeviceWrappingPlug): ... @conf.inject_positional_args def __init__(self, ble_sniffer_host, ble_sniffer_port): super(BleSnifferPlug, self).__init__( ble_sniffer.BleSniffer(ble_sniffer_host, ble_sniffer_port)) ... Because not all third-party device and instrument control libraries can be counted on to do sufficient logging, some debug logging is provided here in the plug layer to show which attributes were called and with what arguments. Args: device: The device to wrap; must not be None. Raises: openhtf.plugs.InvalidPlugError: The _device attribute has the value None when attribute access is attempted. """ verbose = True # overwrite on subclass to disable logging_wrapper. def __init__(self, device): super(DeviceWrappingPlug, self).__init__() self._device = device if hasattr(self._device, 'tearDown') and self.uses_base_tear_down(): self.logger.warning('Wrapped device %s implements a tearDown method, ' 'but using the no-op BasePlug tearDown method.', type(self._device)) def __getattr__(self, attr): if self._device is None: raise openhtf.plugs.InvalidPlugError( 'DeviceWrappingPlug instances must set the _device attribute.') if attr == 'as_base_types': return super(DeviceWrappingPlug, self).__getattr__(attr) attribute = getattr(self._device, attr) if not self.verbose or not callable(attribute): return attribute # Attribute callable; return a wrapper that logs calls with args and kwargs. functools.wraps(attribute, assigned=('__name__', '__doc__')) def logging_wrapper(*args, **kwargs): """Wraps a callable with a logging statement.""" args_strings = tuple(short_repr(arg) for arg in args) kwargs_strings = tuple( ('%s=%s' % (key, short_repr(val)) for key, val in six.iteritems(kwargs)) ) log_line = '%s calling "%s" on device.' % (type(self).__name__, attr) if args_strings or kwargs_strings: log_line += ' Args: \n %s' % (', '.join(args_strings + kwargs_strings)) self.logger.debug(log_line) return attribute(*args, **kwargs) return logging_wrapper
GrandmasterK/XScheduler
refs/heads/master
venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
762
import hashlib import os from pip._vendor.lockfile import LockFile from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile from ..cache import BaseCache from ..controller import CacheController def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise class FileCache(BaseCache): def __init__(self, directory, forever=False, filemode=0o0600, dirmode=0o0700, use_dir_lock=None, lock_class=None): if use_dir_lock is not None and lock_class is not None: raise ValueError("Cannot use use_dir_lock and lock_class together") if use_dir_lock: lock_class = MkdirLockFile if lock_class is None: lock_class = LockFile self.directory = directory self.forever = forever self.filemode = filemode self.dirmode = dirmode self.lock_class = lock_class @staticmethod def encode(x): return hashlib.sha224(x.encode()).hexdigest() def _fn(self, name): # NOTE: This method should not change as some may depend on it. # See: https://github.com/ionrock/cachecontrol/issues/63 hashed = self.encode(name) parts = list(hashed[:5]) + [hashed] return os.path.join(self.directory, *parts) def get(self, key): name = self._fn(key) if not os.path.exists(name): return None with open(name, 'rb') as fh: return fh.read() def set(self, key, value): name = self._fn(key) # Make sure the directory exists try: os.makedirs(os.path.dirname(name), self.dirmode) except (IOError, OSError): pass with self.lock_class(name) as lock: # Write our actual file with _secure_open_write(lock.path, self.filemode) as fh: fh.write(value) def delete(self, key): name = self._fn(key) if not self.forever: os.remove(name) def url_to_file_path(url, filecache): """Return the file cache path based on the URL. This does not ensure the file exists! """ key = CacheController.cache_url(url) return filecache._fn(key)
modulexcite/catapult
refs/heads/master
tracing/PRESUBMIT.py
4
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys def RunChecks(input_api, output_api): # pylint: disable=unused-argument results = [] from tracing_build import check_gypi err = check_gypi.GypiCheck() if err: results += [err] return map(output_api.PresubmitError, results) def CheckChange(input_api, output_api): original_sys_path = sys.path try: sys.path += [input_api.PresubmitLocalPath()] return RunChecks(input_api, output_api) finally: sys.path = original_sys_path def CheckChangeOnUpload(input_api, output_api): return CheckChange(input_api, output_api) def CheckChangeOnCommit(input_api, output_api): return CheckChange(input_api, output_api)
40123103/2015cd_40123103
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/encodings/aliases.py
726
""" Encoding Aliases Support This module is used by the encodings package search function to map encodings names to module names. Note that the search function normalizes the encoding names before doing the lookup, so the mapping will have to map normalized encoding names to module names. Contents: The following aliases dictionary contains mappings of all IANA character set names for which the Python core library provides codecs. In addition to these, a few Python specific codec aliases have also been added. """ aliases = { # Please keep this list sorted alphabetically by value ! # ascii codec '646' : 'ascii', 'ansi_x3.4_1968' : 'ascii', 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name 'ansi_x3.4_1986' : 'ascii', 'cp367' : 'ascii', 'csascii' : 'ascii', 'ibm367' : 'ascii', 'iso646_us' : 'ascii', 'iso_646.irv_1991' : 'ascii', 'iso_ir_6' : 'ascii', 'us' : 'ascii', 'us_ascii' : 'ascii', # base64_codec codec 'base64' : 'base64_codec', 'base_64' : 'base64_codec', # big5 codec 'big5_tw' : 'big5', 'csbig5' : 'big5', # big5hkscs codec 'big5_hkscs' : 'big5hkscs', 'hkscs' : 'big5hkscs', # bz2_codec codec 'bz2' : 'bz2_codec', # cp037 codec '037' : 'cp037', 'csibm037' : 'cp037', 'ebcdic_cp_ca' : 'cp037', 'ebcdic_cp_nl' : 'cp037', 'ebcdic_cp_us' : 'cp037', 'ebcdic_cp_wt' : 'cp037', 'ibm037' : 'cp037', 'ibm039' : 'cp037', # cp1026 codec '1026' : 'cp1026', 'csibm1026' : 'cp1026', 'ibm1026' : 'cp1026', # cp1125 codec '1125' : 'cp1125', 'ibm1125' : 'cp1125', 'cp866u' : 'cp1125', 'ruscii' : 'cp1125', # cp1140 codec '1140' : 'cp1140', 'ibm1140' : 'cp1140', # cp1250 codec '1250' : 'cp1250', 'windows_1250' : 'cp1250', # cp1251 codec '1251' : 'cp1251', 'windows_1251' : 'cp1251', # cp1252 codec '1252' : 'cp1252', 'windows_1252' : 'cp1252', # cp1253 codec '1253' : 'cp1253', 'windows_1253' : 'cp1253', # cp1254 codec '1254' : 'cp1254', 'windows_1254' : 'cp1254', # cp1255 codec '1255' : 'cp1255', 'windows_1255' : 'cp1255', # cp1256 codec '1256' : 'cp1256', 'windows_1256' : 'cp1256', # cp1257 codec '1257' : 'cp1257', 'windows_1257' : 'cp1257', # cp1258 codec '1258' : 'cp1258', 'windows_1258' : 'cp1258', # cp273 codec '273' : 'cp273', 'ibm273' : 'cp273', 'csibm273' : 'cp273', # cp424 codec '424' : 'cp424', 'csibm424' : 'cp424', 'ebcdic_cp_he' : 'cp424', 'ibm424' : 'cp424', # cp437 codec '437' : 'cp437', 'cspc8codepage437' : 'cp437', 'ibm437' : 'cp437', # cp500 codec '500' : 'cp500', 'csibm500' : 'cp500', 'ebcdic_cp_be' : 'cp500', 'ebcdic_cp_ch' : 'cp500', 'ibm500' : 'cp500', # cp775 codec '775' : 'cp775', 'cspc775baltic' : 'cp775', 'ibm775' : 'cp775', # cp850 codec '850' : 'cp850', 'cspc850multilingual' : 'cp850', 'ibm850' : 'cp850', # cp852 codec '852' : 'cp852', 'cspcp852' : 'cp852', 'ibm852' : 'cp852', # cp855 codec '855' : 'cp855', 'csibm855' : 'cp855', 'ibm855' : 'cp855', # cp857 codec '857' : 'cp857', 'csibm857' : 'cp857', 'ibm857' : 'cp857', # cp858 codec '858' : 'cp858', 'csibm858' : 'cp858', 'ibm858' : 'cp858', # cp860 codec '860' : 'cp860', 'csibm860' : 'cp860', 'ibm860' : 'cp860', # cp861 codec '861' : 'cp861', 'cp_is' : 'cp861', 'csibm861' : 'cp861', 'ibm861' : 'cp861', # cp862 codec '862' : 'cp862', 'cspc862latinhebrew' : 'cp862', 'ibm862' : 'cp862', # cp863 codec '863' : 'cp863', 'csibm863' : 'cp863', 'ibm863' : 'cp863', # cp864 codec '864' : 'cp864', 'csibm864' : 'cp864', 'ibm864' : 'cp864', # cp865 codec '865' : 'cp865', 'csibm865' : 'cp865', 'ibm865' : 'cp865', # cp866 codec '866' : 'cp866', 'csibm866' : 'cp866', 'ibm866' : 'cp866', # cp869 codec '869' : 'cp869', 'cp_gr' : 'cp869', 'csibm869' : 'cp869', 'ibm869' : 'cp869', # cp932 codec '932' : 'cp932', 'ms932' : 'cp932', 'mskanji' : 'cp932', 'ms_kanji' : 'cp932', # cp949 codec '949' : 'cp949', 'ms949' : 'cp949', 'uhc' : 'cp949', # cp950 codec '950' : 'cp950', 'ms950' : 'cp950', # euc_jis_2004 codec 'jisx0213' : 'euc_jis_2004', 'eucjis2004' : 'euc_jis_2004', 'euc_jis2004' : 'euc_jis_2004', # euc_jisx0213 codec 'eucjisx0213' : 'euc_jisx0213', # euc_jp codec 'eucjp' : 'euc_jp', 'ujis' : 'euc_jp', 'u_jis' : 'euc_jp', # euc_kr codec 'euckr' : 'euc_kr', 'korean' : 'euc_kr', 'ksc5601' : 'euc_kr', 'ks_c_5601' : 'euc_kr', 'ks_c_5601_1987' : 'euc_kr', 'ksx1001' : 'euc_kr', 'ks_x_1001' : 'euc_kr', # gb18030 codec 'gb18030_2000' : 'gb18030', # gb2312 codec 'chinese' : 'gb2312', 'csiso58gb231280' : 'gb2312', 'euc_cn' : 'gb2312', 'euccn' : 'gb2312', 'eucgb2312_cn' : 'gb2312', 'gb2312_1980' : 'gb2312', 'gb2312_80' : 'gb2312', 'iso_ir_58' : 'gb2312', # gbk codec '936' : 'gbk', 'cp936' : 'gbk', 'ms936' : 'gbk', # hex_codec codec 'hex' : 'hex_codec', # hp_roman8 codec 'roman8' : 'hp_roman8', 'r8' : 'hp_roman8', 'csHPRoman8' : 'hp_roman8', # hz codec 'hzgb' : 'hz', 'hz_gb' : 'hz', 'hz_gb_2312' : 'hz', # iso2022_jp codec 'csiso2022jp' : 'iso2022_jp', 'iso2022jp' : 'iso2022_jp', 'iso_2022_jp' : 'iso2022_jp', # iso2022_jp_1 codec 'iso2022jp_1' : 'iso2022_jp_1', 'iso_2022_jp_1' : 'iso2022_jp_1', # iso2022_jp_2 codec 'iso2022jp_2' : 'iso2022_jp_2', 'iso_2022_jp_2' : 'iso2022_jp_2', # iso2022_jp_2004 codec 'iso_2022_jp_2004' : 'iso2022_jp_2004', 'iso2022jp_2004' : 'iso2022_jp_2004', # iso2022_jp_3 codec 'iso2022jp_3' : 'iso2022_jp_3', 'iso_2022_jp_3' : 'iso2022_jp_3', # iso2022_jp_ext codec 'iso2022jp_ext' : 'iso2022_jp_ext', 'iso_2022_jp_ext' : 'iso2022_jp_ext', # iso2022_kr codec 'csiso2022kr' : 'iso2022_kr', 'iso2022kr' : 'iso2022_kr', 'iso_2022_kr' : 'iso2022_kr', # iso8859_10 codec 'csisolatin6' : 'iso8859_10', 'iso_8859_10' : 'iso8859_10', 'iso_8859_10_1992' : 'iso8859_10', 'iso_ir_157' : 'iso8859_10', 'l6' : 'iso8859_10', 'latin6' : 'iso8859_10', # iso8859_11 codec 'thai' : 'iso8859_11', 'iso_8859_11' : 'iso8859_11', 'iso_8859_11_2001' : 'iso8859_11', # iso8859_13 codec 'iso_8859_13' : 'iso8859_13', 'l7' : 'iso8859_13', 'latin7' : 'iso8859_13', # iso8859_14 codec 'iso_8859_14' : 'iso8859_14', 'iso_8859_14_1998' : 'iso8859_14', 'iso_celtic' : 'iso8859_14', 'iso_ir_199' : 'iso8859_14', 'l8' : 'iso8859_14', 'latin8' : 'iso8859_14', # iso8859_15 codec 'iso_8859_15' : 'iso8859_15', 'l9' : 'iso8859_15', 'latin9' : 'iso8859_15', # iso8859_16 codec 'iso_8859_16' : 'iso8859_16', 'iso_8859_16_2001' : 'iso8859_16', 'iso_ir_226' : 'iso8859_16', 'l10' : 'iso8859_16', 'latin10' : 'iso8859_16', # iso8859_2 codec 'csisolatin2' : 'iso8859_2', 'iso_8859_2' : 'iso8859_2', 'iso_8859_2_1987' : 'iso8859_2', 'iso_ir_101' : 'iso8859_2', 'l2' : 'iso8859_2', 'latin2' : 'iso8859_2', # iso8859_3 codec 'csisolatin3' : 'iso8859_3', 'iso_8859_3' : 'iso8859_3', 'iso_8859_3_1988' : 'iso8859_3', 'iso_ir_109' : 'iso8859_3', 'l3' : 'iso8859_3', 'latin3' : 'iso8859_3', # iso8859_4 codec 'csisolatin4' : 'iso8859_4', 'iso_8859_4' : 'iso8859_4', 'iso_8859_4_1988' : 'iso8859_4', 'iso_ir_110' : 'iso8859_4', 'l4' : 'iso8859_4', 'latin4' : 'iso8859_4', # iso8859_5 codec 'csisolatincyrillic' : 'iso8859_5', 'cyrillic' : 'iso8859_5', 'iso_8859_5' : 'iso8859_5', 'iso_8859_5_1988' : 'iso8859_5', 'iso_ir_144' : 'iso8859_5', # iso8859_6 codec 'arabic' : 'iso8859_6', 'asmo_708' : 'iso8859_6', 'csisolatinarabic' : 'iso8859_6', 'ecma_114' : 'iso8859_6', 'iso_8859_6' : 'iso8859_6', 'iso_8859_6_1987' : 'iso8859_6', 'iso_ir_127' : 'iso8859_6', # iso8859_7 codec 'csisolatingreek' : 'iso8859_7', 'ecma_118' : 'iso8859_7', 'elot_928' : 'iso8859_7', 'greek' : 'iso8859_7', 'greek8' : 'iso8859_7', 'iso_8859_7' : 'iso8859_7', 'iso_8859_7_1987' : 'iso8859_7', 'iso_ir_126' : 'iso8859_7', # iso8859_8 codec 'csisolatinhebrew' : 'iso8859_8', 'hebrew' : 'iso8859_8', 'iso_8859_8' : 'iso8859_8', 'iso_8859_8_1988' : 'iso8859_8', 'iso_ir_138' : 'iso8859_8', # iso8859_9 codec 'csisolatin5' : 'iso8859_9', 'iso_8859_9' : 'iso8859_9', 'iso_8859_9_1989' : 'iso8859_9', 'iso_ir_148' : 'iso8859_9', 'l5' : 'iso8859_9', 'latin5' : 'iso8859_9', # johab codec 'cp1361' : 'johab', 'ms1361' : 'johab', # koi8_r codec 'cskoi8r' : 'koi8_r', # latin_1 codec # # Note that the latin_1 codec is implemented internally in C and a # lot faster than the charmap codec iso8859_1 which uses the same # encoding. This is why we discourage the use of the iso8859_1 # codec and alias it to latin_1 instead. # '8859' : 'latin_1', 'cp819' : 'latin_1', 'csisolatin1' : 'latin_1', 'ibm819' : 'latin_1', 'iso8859' : 'latin_1', 'iso8859_1' : 'latin_1', 'iso_8859_1' : 'latin_1', 'iso_8859_1_1987' : 'latin_1', 'iso_ir_100' : 'latin_1', 'l1' : 'latin_1', 'latin' : 'latin_1', 'latin1' : 'latin_1', # mac_cyrillic codec 'maccyrillic' : 'mac_cyrillic', # mac_greek codec 'macgreek' : 'mac_greek', # mac_iceland codec 'maciceland' : 'mac_iceland', # mac_latin2 codec 'maccentraleurope' : 'mac_latin2', 'maclatin2' : 'mac_latin2', # mac_roman codec 'macintosh' : 'mac_roman', 'macroman' : 'mac_roman', # mac_turkish codec 'macturkish' : 'mac_turkish', # mbcs codec 'dbcs' : 'mbcs', # ptcp154 codec 'csptcp154' : 'ptcp154', 'pt154' : 'ptcp154', 'cp154' : 'ptcp154', 'cyrillic_asian' : 'ptcp154', # quopri_codec codec 'quopri' : 'quopri_codec', 'quoted_printable' : 'quopri_codec', 'quotedprintable' : 'quopri_codec', # rot_13 codec 'rot13' : 'rot_13', # shift_jis codec 'csshiftjis' : 'shift_jis', 'shiftjis' : 'shift_jis', 'sjis' : 'shift_jis', 's_jis' : 'shift_jis', # shift_jis_2004 codec 'shiftjis2004' : 'shift_jis_2004', 'sjis_2004' : 'shift_jis_2004', 's_jis_2004' : 'shift_jis_2004', # shift_jisx0213 codec 'shiftjisx0213' : 'shift_jisx0213', 'sjisx0213' : 'shift_jisx0213', 's_jisx0213' : 'shift_jisx0213', # tactis codec 'tis260' : 'tactis', # tis_620 codec 'tis620' : 'tis_620', 'tis_620_0' : 'tis_620', 'tis_620_2529_0' : 'tis_620', 'tis_620_2529_1' : 'tis_620', 'iso_ir_166' : 'tis_620', # utf_16 codec 'u16' : 'utf_16', 'utf16' : 'utf_16', # utf_16_be codec 'unicodebigunmarked' : 'utf_16_be', 'utf_16be' : 'utf_16_be', # utf_16_le codec 'unicodelittleunmarked' : 'utf_16_le', 'utf_16le' : 'utf_16_le', # utf_32 codec 'u32' : 'utf_32', 'utf32' : 'utf_32', # utf_32_be codec 'utf_32be' : 'utf_32_be', # utf_32_le codec 'utf_32le' : 'utf_32_le', # utf_7 codec 'u7' : 'utf_7', 'utf7' : 'utf_7', 'unicode_1_1_utf_7' : 'utf_7', # utf_8 codec 'u8' : 'utf_8', 'utf' : 'utf_8', 'utf8' : 'utf_8', 'utf8_ucs2' : 'utf_8', 'utf8_ucs4' : 'utf_8', # uu_codec codec 'uu' : 'uu_codec', # zlib_codec codec 'zip' : 'zlib_codec', 'zlib' : 'zlib_codec', # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 'x_mac_japanese' : 'shift_jis', 'x_mac_korean' : 'euc_kr', 'x_mac_simp_chinese' : 'gb2312', 'x_mac_trad_chinese' : 'big5', }
andreaso/ansible
refs/heads/devel
lib/ansible/modules/cloud/amazon/lambda_facts.py
78
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: lambda_facts short_description: Gathers AWS Lambda function details as Ansible facts description: - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and M(lambda_event) to manage lambda event source mappings. version_added: "2.2" options: query: description: - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts. required: true choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] default: "all" function_name: description: - The name of the lambda function for which facts are requested. required: false default: null aliases: [ "function", "name"] event_source_arn: description: - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. default: null required: false author: Pierre Jodouin (@pjodouin) requirements: - boto3 extends_documentation_fragment: - aws ''' EXAMPLES = ''' --- # Simple example of listing all info for a function - name: List all for a specific function lambda_facts: query: all function_name: myFunction register: my_function_details # List all versions of a function - name: List function versions lambda_facts: query: versions function_name: myFunction register: my_function_versions # List all lambda function versions - name: List all function lambda_facts: query: all max_items: 20 - name: show Lambda facts debug: var: lambda_facts ''' RETURN = ''' --- lambda_facts: description: lambda facts returned: success type: dict lambda_facts.function: description: lambda function list returned: success type: dict lambda_facts.function.TheName: description: lambda function information, including event, mapping, and version information returned: success type: dict ''' import datetime import sys try: import boto3 from botocore.exceptions import ClientError HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False def fix_return(node): """ fixup returned dictionary :param node: :return: """ if isinstance(node, datetime.datetime): node_value = str(node) elif isinstance(node, list): node_value = [fix_return(item) for item in node] elif isinstance(node, dict): node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) else: node_value = node return node_value def alias_details(client, module): """ Returns list of aliases for a specified function. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_facts = dict() function_name = module.params.get('function_name') if function_name: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(aliases=[]) else: module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e)) else: module.fail_json(msg='Parameter function_name required for query=aliases.') return {function_name: camel_dict_to_snake_dict(lambda_facts)} def all_details(client, module): """ Returns all lambda related facts. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ if module.params.get('max_items') or module.params.get('next_marker'): module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') lambda_facts = dict() function_name = module.params.get('function_name') if function_name: lambda_facts[function_name] = {} lambda_facts[function_name].update(config_details(client, module)[function_name]) lambda_facts[function_name].update(alias_details(client, module)[function_name]) lambda_facts[function_name].update(policy_details(client, module)[function_name]) lambda_facts[function_name].update(version_details(client, module)[function_name]) lambda_facts[function_name].update(mapping_details(client, module)[function_name]) else: lambda_facts.update(config_details(client, module)) return lambda_facts def config_details(client, module): """ Returns configuration details for one or all lambda functions. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_facts = dict() function_name = module.params.get('function_name') if function_name: try: lambda_facts.update(client.get_function_configuration(FunctionName=function_name)) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(function={}) else: module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e)) else: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_facts.update(function_list=client.list_functions(**params)['Functions']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(function_list=[]) else: module.fail_json(msg='Unable to get function list, error: {0}'.format(e)) functions = dict() for func in lambda_facts.pop('function_list', []): functions[func['FunctionName']] = camel_dict_to_snake_dict(func) return functions return {function_name: camel_dict_to_snake_dict(lambda_facts)} def mapping_details(client, module): """ Returns all lambda event source mappings. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_facts = dict() params = dict() function_name = module.params.get('function_name') if function_name: params['FunctionName'] = module.params.get('function_name') if module.params.get('event_source_arn'): params['EventSourceArn'] = module.params.get('event_source_arn') if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(mappings=[]) else: module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e)) if function_name: return {function_name: camel_dict_to_snake_dict(lambda_facts)} return camel_dict_to_snake_dict(lambda_facts) def policy_details(client, module): """ Returns policy attached to a lambda function. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ if module.params.get('max_items') or module.params.get('next_marker'): module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') lambda_facts = dict() function_name = module.params.get('function_name') if function_name: try: # get_policy returns a JSON string so must convert to dict before reassigning to its key lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(policy={}) else: module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e)) else: module.fail_json(msg='Parameter function_name required for query=policy.') return {function_name: camel_dict_to_snake_dict(lambda_facts)} def version_details(client, module): """ Returns all lambda function versions. :param client: AWS API client reference (boto3) :param module: Ansible module reference :return dict: """ lambda_facts = dict() function_name = module.params.get('function_name') if function_name: params = dict() if module.params.get('max_items'): params['MaxItems'] = module.params.get('max_items') if module.params.get('next_marker'): params['Marker'] = module.params.get('next_marker') try: lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': lambda_facts.update(versions=[]) else: module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e)) else: module.fail_json(msg='Parameter function_name required for query=versions.') return {function_name: camel_dict_to_snake_dict(lambda_facts)} def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = ec2_argument_spec() argument_spec.update( dict( function_name=dict(required=False, default=None, aliases=['function', 'name']), query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), event_source_arn=dict(required=False, default=None) ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate dependencies if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module.') # validate function_name if present function_name = module.params['function_name'] if function_name: if not re.search("^[\w\-:]+$", function_name): module.fail_json( msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) ) if len(function_name) > 64: module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) try: region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) aws_connect_kwargs.update(dict(region=region, endpoint=endpoint, conn_type='client', resource='lambda' )) client = boto3_conn(module, **aws_connect_kwargs) except ClientError as e: module.fail_json(msg="Can't authorize connection - {0}".format(e)) this_module = sys.modules[__name__] invocations = dict( aliases='alias_details', all='all_details', config='config_details', mappings='mapping_details', policy='policy_details', versions='version_details', ) this_module_function = getattr(this_module, invocations[module.params['query']]) all_facts = fix_return(this_module_function(client, module)) results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False) if module.check_mode: results['msg'] = 'Check mode set but ignored for fact gathering only.' module.exit_json(**results) # ansible import module(s) kept at ~eof as recommended from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
zulip/zulip
refs/heads/master
zerver/lib/notification_data.py
1
from dataclasses import dataclass from typing import Collection, Optional, Set @dataclass class UserMessageNotificationsData: user_id: int flags: Collection[str] mentioned: bool online_push_enabled: bool stream_push_notify: bool stream_email_notify: bool wildcard_mention_notify: bool sender_is_muted: bool def __post_init__(self) -> None: if self.mentioned: assert "mentioned" in self.flags if self.wildcard_mention_notify: assert "wildcard_mentioned" in self.flags @classmethod def from_user_id_sets( cls, user_id: int, flags: Collection[str], online_push_user_ids: Set[int], stream_push_user_ids: Set[int], stream_email_user_ids: Set[int], wildcard_mention_user_ids: Set[int], muted_sender_user_ids: Set[int], ) -> "UserMessageNotificationsData": wildcard_mention_notify = ( user_id in wildcard_mention_user_ids and "wildcard_mentioned" in flags ) return cls( user_id=user_id, flags=flags, mentioned=("mentioned" in flags), online_push_enabled=(user_id in online_push_user_ids), stream_push_notify=(user_id in stream_push_user_ids), stream_email_notify=(user_id in stream_email_user_ids), wildcard_mention_notify=wildcard_mention_notify, sender_is_muted=(user_id in muted_sender_user_ids), ) # TODO: The following functions should also look at the `enable_offline_push_notifications` and # `enable_offline_email_notifications` settings (for PMs and mentions), but currently they # don't. # For these functions, acting_user_id is the user sent a message # (or edited a message) triggering the event for which we need to # determine notifiability. def is_notifiable(self, private_message: bool, acting_user_id: int, idle: bool) -> bool: return self.is_email_notifiable( private_message, acting_user_id, idle ) or self.is_push_notifiable(private_message, acting_user_id, idle) def is_push_notifiable(self, private_message: bool, acting_user_id: int, idle: bool) -> bool: return self.get_push_notification_trigger(private_message, acting_user_id, idle) is not None def get_push_notification_trigger( self, private_message: bool, acting_user_id: int, idle: bool ) -> Optional[str]: if not idle and not self.online_push_enabled: return None if self.user_id == acting_user_id: return None if self.sender_is_muted: return None if private_message: return "private_message" elif self.mentioned: return "mentioned" elif self.wildcard_mention_notify: return "wildcard_mentioned" elif self.stream_push_notify: return "stream_push_notify" else: return None def is_email_notifiable(self, private_message: bool, acting_user_id: int, idle: bool) -> bool: return ( self.get_email_notification_trigger(private_message, acting_user_id, idle) is not None ) def get_email_notification_trigger( self, private_message: bool, acting_user_id: int, idle: bool ) -> Optional[str]: if not idle: return None if self.user_id == acting_user_id: return None if self.sender_is_muted: return None if private_message: return "private_message" elif self.mentioned: return "mentioned" elif self.wildcard_mention_notify: return "wildcard_mentioned" elif self.stream_email_notify: return "stream_email_notify" else: return None
erdc-cm/air-water-vv
refs/heads/master
3d/Directional_Wave_Current_interaction/45DEG_R1/tank_batch.py
5
simFlagsList[0]['storeQuantities']= ["q:'phi_solid'","q:'velocity_solid'"] #simFlagsList[0]['storeQuantities']= ["q:velocity_solid"] start quit
catapult-project/catapult
refs/heads/master
third_party/html5lib-python/html5lib/tests/test_whitespace_filter.py
453
from __future__ import absolute_import, division, unicode_literals import unittest from html5lib.filters.whitespace import Filter from html5lib.constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) try: unittest.TestCase.assertEqual except AttributeError: unittest.TestCase.assertEqual = unittest.TestCase.assertEquals class TestCase(unittest.TestCase): def runTest(self, input, expected): output = list(Filter(input)) errorMsg = "\n".join(["\n\nInput:", str(input), "\nExpected:", str(expected), "\nReceived:", str(output)]) self.assertEqual(output, expected, errorMsg) def runTestUnmodifiedOutput(self, input): self.runTest(input, input) def testPhrasingElements(self): self.runTestUnmodifiedOutput( [{"type": "Characters", "data": "This is a "}, {"type": "StartTag", "name": "span", "data": []}, {"type": "Characters", "data": "phrase"}, {"type": "EndTag", "name": "span", "data": []}, {"type": "SpaceCharacters", "data": " "}, {"type": "Characters", "data": "with"}, {"type": "SpaceCharacters", "data": " "}, {"type": "StartTag", "name": "em", "data": []}, {"type": "Characters", "data": "emphasised text"}, {"type": "EndTag", "name": "em", "data": []}, {"type": "Characters", "data": " and an "}, {"type": "StartTag", "name": "img", "data": [["alt", "image"]]}, {"type": "Characters", "data": "."}]) def testLeadingWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "SpaceCharacters", "data": " "}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "p", "data": []}]) def testLeadingWhitespaceAsCharacters(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": spaceCharacters + "foo"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": " foo"}, {"type": "EndTag", "name": "p", "data": []}]) def testTrailingWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": " "}, {"type": "EndTag", "name": "p", "data": []}]) def testTrailingWhitespaceAsCharacters(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo "}, {"type": "EndTag", "name": "p", "data": []}]) def testWhitespace(self): self.runTest( [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters + "bar"}, {"type": "EndTag", "name": "p", "data": []}], [{"type": "StartTag", "name": "p", "data": []}, {"type": "Characters", "data": "foo bar"}, {"type": "EndTag", "name": "p", "data": []}]) def testLeadingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "Characters", "data": "foo"}, {"type": "EndTag", "name": "pre", "data": []}]) def testLeadingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": spaceCharacters + "foo"}, {"type": "EndTag", "name": "pre", "data": []}]) def testTrailingWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo"}, {"type": "SpaceCharacters", "data": spaceCharacters}, {"type": "EndTag", "name": "pre", "data": []}]) def testTrailingWhitespaceAsCharactersInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters}, {"type": "EndTag", "name": "pre", "data": []}]) def testWhitespaceInPre(self): self.runTestUnmodifiedOutput( [{"type": "StartTag", "name": "pre", "data": []}, {"type": "Characters", "data": "foo" + spaceCharacters + "bar"}, {"type": "EndTag", "name": "pre", "data": []}]) def buildTestSuite(): return unittest.defaultTestLoader.loadTestsFromName(__name__) def main(): buildTestSuite() unittest.main() if __name__ == "__main__": main()
jonathan-beard/edx-platform
refs/heads/master
lms/djangoapps/verify_student/migrations/0006_auto__add_skippedreverification__add_unique_skippedreverification_user.py
92
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'SkippedReverification' db.create_table('verify_student_skippedreverification', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)), ('checkpoint', self.gf('django.db.models.fields.related.ForeignKey')(related_name='skipped_checkpoint', to=orm['verify_student.VerificationCheckpoint'])), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal('verify_student', ['SkippedReverification']) # Adding unique constraint on 'SkippedReverification', fields ['user', 'course_id'] db.create_unique('verify_student_skippedreverification', ['user_id', 'course_id']) def backwards(self, orm): # Removing unique constraint on 'SkippedReverification', fields ['user', 'course_id'] db.delete_unique('verify_student_skippedreverification', ['user_id', 'course_id']) # Deleting model 'SkippedReverification' db.delete_table('verify_student_skippedreverification') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'reverification.midcoursereverificationwindow': { 'Meta': {'object_name': 'MidcourseReverificationWindow'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}) }, 'verify_student.incoursereverificationconfiguration': { 'Meta': {'object_name': 'InCourseReverificationConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'verify_student.skippedreverification': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'}, 'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'verify_student.softwaresecurephotoverification': { 'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}), 'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}), 'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'aaa24e01-3318-4707-a3ed-74d0f1c1ed15'", 'max_length': '255', 'db_index': 'True'}), 'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}), 'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}), 'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'}) }, 'verify_student.verificationcheckpoint': { 'Meta': {'unique_together': "(('course_id', 'checkpoint_name'),)", 'object_name': 'VerificationCheckpoint'}, 'checkpoint_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'}) }, 'verify_student.verificationstatus': { 'Meta': {'object_name': 'VerificationStatus'}, 'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}), 'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['verify_student']
tiredtyrant/CloudBot
refs/heads/gonzobot
tests/plugin_tests/test_optout.py
1
from plugins.core.optout import get_conn_optouts, optout_cache def test_conn_case(): conn_list = optout_cache['TestConnection'] assert get_conn_optouts('TestConnection') is conn_list assert get_conn_optouts('testconnection') is conn_list assert get_conn_optouts('testconnection1') is not conn_list conn_list = optout_cache['testconnection'] assert get_conn_optouts('TestConnection') is conn_list assert get_conn_optouts('testconnection') is conn_list assert get_conn_optouts('testconnection1') is not conn_list
zer0yu/ZEROScan
refs/heads/master
thirdparty/requests/packages/chardet/sbcsgroupprober.py
206
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber class SBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), ] hebrewProber = HebrewProber() logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber) visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) self.reset()
yury-s/v8-inspector
refs/heads/master
Source/chrome/tools/telemetry/telemetry/value/histogram_util.py
12
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This is a helper module to get and manipulate histogram data. The histogram data is the same data as is visible from "chrome://histograms". More information can be found at: chromium/src/base/metrics/histogram.h """ import collections import json import logging BROWSER_HISTOGRAM = 'browser_histogram' RENDERER_HISTOGRAM = 'renderer_histogram' def GetHistogramBucketsFromJson(histogram_json): return GetHistogramBucketsFromRawValue(json.loads(histogram_json)) def GetHistogramBucketsFromRawValue(raw_value): buckets = raw_value.get('buckets', []) if buckets: # If there are values greater than the maximum allowable for the histogram, # the highest bucket will have a 'low': maxvalue entry in the dict but no # 'high' entry. Code often assumes the 'high' value will always be present, # and uses it to get bucket mean. So default it to the same value as low. buckets[-1].setdefault('high', buckets[-1]['low']) return buckets def CustomizeBrowserOptions(options): """Allows histogram collection.""" options.AppendExtraBrowserArgs(['--enable-stats-collection-bindings']) def SubtractHistogram(histogram_json, start_histogram_json): """Subtracts a previous histogram from a histogram. Both parameters and the returned result are json serializations. """ start_histogram = json.loads(start_histogram_json) start_histogram_buckets = GetHistogramBucketsFromRawValue(start_histogram) # It's ok if the start histogram is empty (we had no data, maybe even no # histogram at all, at the start of the test). if not start_histogram_buckets: return histogram_json histogram = json.loads(histogram_json) if ('pid' in start_histogram and 'pid' in histogram and start_histogram['pid'] != histogram['pid']): raise Exception( 'Trying to compare histograms from different processes (%d and %d)' % (start_histogram['pid'], histogram['pid'])) start_histogram_bucket_counts = dict() for b in start_histogram_buckets: start_histogram_bucket_counts[b['low']] = b['count'] new_buckets = [] for b in GetHistogramBucketsFromRawValue(histogram): new_bucket = b low = b['low'] if low in start_histogram_bucket_counts: new_bucket['count'] = b['count'] - start_histogram_bucket_counts[low] if new_bucket['count'] < 0: logging.error('Histogram subtraction error, starting histogram most ' 'probably invalid.') if new_bucket['count']: new_buckets.append(new_bucket) histogram['buckets'] = new_buckets histogram['count'] -= start_histogram['count'] return json.dumps(histogram) def AddHistograms(histogram_jsons): """Adds histograms together. Used for aggregating data. The parameter is a list of json serializations and the returned result is a json serialization too. Note that the histograms to be added together are typically from different processes. """ buckets = collections.defaultdict(int) for histogram_json in histogram_jsons: for b in GetHistogramBucketsFromJson(histogram_json): key = (b['low'], b['high']) buckets[key] += b['count'] buckets = [{'low': key[0], 'high': key[1], 'count': value} for key, value in buckets.iteritems()] buckets.sort(key=lambda h: h['low']) result_histogram = {} result_histogram['buckets'] = buckets return json.dumps(result_histogram) def GetHistogram(histogram_type, histogram_name, tab): """Get a json serialization of a histogram.""" assert histogram_type in [BROWSER_HISTOGRAM, RENDERER_HISTOGRAM] function = 'getHistogram' if histogram_type == BROWSER_HISTOGRAM: function = 'getBrowserHistogram' histogram_json = tab.EvaluateJavaScript( 'statsCollectionController.%s("%s")' % (function, histogram_name)) if histogram_json: return histogram_json return None def GetHistogramCount(histogram_type, histogram_name, tab): """Get the count of events for the given histograms.""" histogram_json = GetHistogram(histogram_type, histogram_name, tab) histogram = json.loads(histogram_json) if 'count' in histogram: return histogram['count'] else: return 0 def GetHistogramSum(histogram_type, histogram_name, tab): """Get the sum of events for the given histograms.""" histogram_json = GetHistogram(histogram_type, histogram_name, tab) histogram = json.loads(histogram_json) if 'sum' in histogram: return histogram['sum'] else: return 0
HackerTool/pyelftools
refs/heads/master
elftools/elf/elffile.py
6
#------------------------------------------------------------------------------- # elftools: elf/elffile.py # # ELFFile - main class for accessing ELF files # # Eli Bendersky (eliben@gmail.com) # This code is in the public domain #------------------------------------------------------------------------------- from ..common.py3compat import BytesIO from ..common.exceptions import ELFError from ..common.utils import struct_parse, elf_assert from ..construct import ConstructError from .structs import ELFStructs from .sections import ( Section, StringTableSection, SymbolTableSection, SUNWSyminfoTableSection, NullSection) from .dynamic import DynamicSection, DynamicSegment from .relocation import RelocationSection, RelocationHandler from .gnuversions import ( GNUVerNeedSection, GNUVerDefSection, GNUVerSymSection) from .segments import Segment, InterpSegment, NoteSegment from ..dwarf.dwarfinfo import DWARFInfo, DebugSectionDescriptor, DwarfConfig class ELFFile(object): """ Creation: the constructor accepts a stream (file-like object) with the contents of an ELF file. Accessible attributes: stream: The stream holding the data of the file - must be a binary stream (bytes, not string). elfclass: 32 or 64 - specifies the word size of the target machine little_endian: boolean - specifies the target machine's endianness header: the complete ELF file header e_ident_raw: the raw e_ident field of the header """ def __init__(self, stream): self.stream = stream self._identify_file() self.structs = ELFStructs( little_endian=self.little_endian, elfclass=self.elfclass) self.header = self._parse_elf_header() self.stream.seek(0) self.e_ident_raw = self.stream.read(16) self._file_stringtable_section = self._get_file_stringtable() self._section_name_map = None def num_sections(self): """ Number of sections in the file """ return self['e_shnum'] def get_section(self, n): """ Get the section at index #n from the file (Section object or a subclass) """ section_header = self._get_section_header(n) return self._make_section(section_header) def get_section_by_name(self, name): """ Get a section from the file, by name. Return None if no such section exists. """ # The first time this method is called, construct a name to number # mapping # if self._section_name_map is None: self._section_name_map = {} for i, sec in enumerate(self.iter_sections()): self._section_name_map[sec.name] = i secnum = self._section_name_map.get(name, None) return None if secnum is None else self.get_section(secnum) def iter_sections(self): """ Yield all the sections in the file """ for i in range(self.num_sections()): yield self.get_section(i) def num_segments(self): """ Number of segments in the file """ return self['e_phnum'] def get_segment(self, n): """ Get the segment at index #n from the file (Segment object) """ segment_header = self._get_segment_header(n) return self._make_segment(segment_header) def iter_segments(self): """ Yield all the segments in the file """ for i in range(self.num_segments()): yield self.get_segment(i) def address_offsets(self, start, size=1): """ Yield a file offset for each ELF segment containing a memory region. A memory region is defined by the range [start...start+size). The offset of the region is yielded. """ end = start + size for seg in self.iter_segments(): if (start >= seg['p_vaddr'] and end <= seg['p_vaddr'] + seg['p_filesz']): yield start - seg['p_vaddr'] + seg['p_offset'] def has_dwarf_info(self): """ Check whether this file appears to have debugging information. We assume that if it has the debug_info section, it has all theother required sections as well. """ return bool(self.get_section_by_name('.debug_info')) def get_dwarf_info(self, relocate_dwarf_sections=True): """ Return a DWARFInfo object representing the debugging information in this file. If relocate_dwarf_sections is True, relocations for DWARF sections are looked up and applied. """ # Expect that has_dwarf_info was called, so at least .debug_info is # present. # Sections that aren't found will be passed as None to DWARFInfo. # debug_sections = {} for secname in ('.debug_info', '.debug_abbrev', '.debug_str', '.debug_line', '.debug_frame', '.debug_loc', '.debug_ranges'): section = self.get_section_by_name(secname) if section is None: debug_sections[secname] = None else: debug_sections[secname] = self._read_dwarf_section( section, relocate_dwarf_sections) return DWARFInfo( config=DwarfConfig( little_endian=self.little_endian, default_address_size=self.elfclass // 8, machine_arch=self.get_machine_arch()), debug_info_sec=debug_sections['.debug_info'], debug_abbrev_sec=debug_sections['.debug_abbrev'], debug_frame_sec=debug_sections['.debug_frame'], # TODO(eliben): reading of eh_frame is not hooked up yet eh_frame_sec=None, debug_str_sec=debug_sections['.debug_str'], debug_loc_sec=debug_sections['.debug_loc'], debug_ranges_sec=debug_sections['.debug_ranges'], debug_line_sec=debug_sections['.debug_line']) def get_machine_arch(self): """ Return the machine architecture, as detected from the ELF header. Not all architectures are supported at the moment. """ if self['e_machine'] == 'EM_X86_64': return 'x64' elif self['e_machine'] in ('EM_386', 'EM_486'): return 'x86' elif self['e_machine'] == 'EM_ARM': return 'ARM' elif self['e_machine'] == 'EM_AARCH64': return 'AArch64' elif self['e_machine'] == 'EM_MIPS': return 'MIPS' else: return '<unknown>' #-------------------------------- PRIVATE --------------------------------# def __getitem__(self, name): """ Implement dict-like access to header entries """ return self.header[name] def _identify_file(self): """ Verify the ELF file and identify its class and endianness. """ # Note: this code reads the stream directly, without using ELFStructs, # since we don't yet know its exact format. ELF was designed to be # read like this - its e_ident field is word-size and endian agnostic. # self.stream.seek(0) magic = self.stream.read(4) elf_assert(magic == b'\x7fELF', 'Magic number does not match') ei_class = self.stream.read(1) if ei_class == b'\x01': self.elfclass = 32 elif ei_class == b'\x02': self.elfclass = 64 else: raise ELFError('Invalid EI_CLASS %s' % repr(ei_class)) ei_data = self.stream.read(1) if ei_data == b'\x01': self.little_endian = True elif ei_data == b'\x02': self.little_endian = False else: raise ELFError('Invalid EI_DATA %s' % repr(ei_data)) def _section_offset(self, n): """ Compute the offset of section #n in the file """ return self['e_shoff'] + n * self['e_shentsize'] def _segment_offset(self, n): """ Compute the offset of segment #n in the file """ return self['e_phoff'] + n * self['e_phentsize'] def _make_segment(self, segment_header): """ Create a Segment object of the appropriate type """ segtype = segment_header['p_type'] if segtype == 'PT_INTERP': return InterpSegment(segment_header, self.stream) elif segtype == 'PT_DYNAMIC': return DynamicSegment(segment_header, self.stream, self) elif segtype == 'PT_NOTE': return NoteSegment(segment_header, self.stream, self) else: return Segment(segment_header, self.stream) def _get_section_header(self, n): """ Find the header of section #n, parse it and return the struct """ return struct_parse( self.structs.Elf_Shdr, self.stream, stream_pos=self._section_offset(n)) def _get_section_name(self, section_header): """ Given a section header, find this section's name in the file's string table """ name_offset = section_header['sh_name'] return self._file_stringtable_section.get_string(name_offset) def _make_section(self, section_header): """ Create a section object of the appropriate type """ name = self._get_section_name(section_header) sectype = section_header['sh_type'] if sectype == 'SHT_STRTAB': return StringTableSection(section_header, name, self.stream) elif sectype == 'SHT_NULL': return NullSection(section_header, name, self.stream) elif sectype in ('SHT_SYMTAB', 'SHT_DYNSYM', 'SHT_SUNW_LDYNSYM'): return self._make_symbol_table_section(section_header, name) elif sectype == 'SHT_SUNW_syminfo': return self._make_sunwsyminfo_table_section(section_header, name) elif sectype == 'SHT_GNU_verneed': return self._make_gnu_verneed_section(section_header, name) elif sectype == 'SHT_GNU_verdef': return self._make_gnu_verdef_section(section_header, name) elif sectype == 'SHT_GNU_versym': return self._make_gnu_versym_section(section_header, name) elif sectype in ('SHT_REL', 'SHT_RELA'): return RelocationSection( section_header, name, self.stream, self) elif sectype == 'SHT_DYNAMIC': return DynamicSection(section_header, name, self.stream, self) else: return Section(section_header, name, self.stream) def _make_symbol_table_section(self, section_header, name): """ Create a SymbolTableSection """ linked_strtab_index = section_header['sh_link'] strtab_section = self.get_section(linked_strtab_index) return SymbolTableSection( section_header, name, self.stream, elffile=self, stringtable=strtab_section) def _make_sunwsyminfo_table_section(self, section_header, name): """ Create a SUNWSyminfoTableSection """ linked_strtab_index = section_header['sh_link'] strtab_section = self.get_section(linked_strtab_index) return SUNWSyminfoTableSection( section_header, name, self.stream, elffile=self, symboltable=strtab_section) def _make_gnu_verneed_section(self, section_header, name): """ Create a GNUVerNeedSection """ linked_strtab_index = section_header['sh_link'] strtab_section = self.get_section(linked_strtab_index) return GNUVerNeedSection( section_header, name, self.stream, elffile=self, stringtable=strtab_section) def _make_gnu_verdef_section(self, section_header, name): """ Create a GNUVerDefSection """ linked_strtab_index = section_header['sh_link'] strtab_section = self.get_section(linked_strtab_index) return GNUVerDefSection( section_header, name, self.stream, elffile=self, stringtable=strtab_section) def _make_gnu_versym_section(self, section_header, name): """ Create a GNUVerSymSection """ linked_strtab_index = section_header['sh_link'] strtab_section = self.get_section(linked_strtab_index) return GNUVerSymSection( section_header, name, self.stream, elffile=self, symboltable=strtab_section) def _get_segment_header(self, n): """ Find the header of segment #n, parse it and return the struct """ return struct_parse( self.structs.Elf_Phdr, self.stream, stream_pos=self._segment_offset(n)) def _get_file_stringtable(self): """ Find the file's string table section """ stringtable_section_num = self['e_shstrndx'] return StringTableSection( header=self._get_section_header(stringtable_section_num), name='', stream=self.stream) def _parse_elf_header(self): """ Parses the ELF file header and assigns the result to attributes of this object. """ return struct_parse(self.structs.Elf_Ehdr, self.stream, stream_pos=0) def _read_dwarf_section(self, section, relocate_dwarf_sections): """ Read the contents of a DWARF section from the stream and return a DebugSectionDescriptor. Apply relocations if asked to. """ self.stream.seek(section['sh_offset']) # The section data is read into a new stream, for processing section_stream = BytesIO() section_stream.write(self.stream.read(section['sh_size'])) if relocate_dwarf_sections: reloc_handler = RelocationHandler(self) reloc_section = reloc_handler.find_relocations_for_section(section) if reloc_section is not None: reloc_handler.apply_section_relocations( section_stream, reloc_section) return DebugSectionDescriptor( stream=section_stream, name=section.name, global_offset=section['sh_offset'], size=section['sh_size'])
mmaker/bridgedb
refs/heads/master
lib/bridgedb/test/test_HTTPServer.py
2
# -*- encoding: utf-8 -*- #_____________________________________________________________________________ # # This file is part of BridgeDB, a Tor bridge distribution system. # # :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org> # :copyright: (c) 2014, Isis Lovecruft # (c) 2014, The Tor Project, Inc. # :license: see LICENSE for licensing information #_____________________________________________________________________________ """Unittests for :mod:`bridgedb.HTTPServer`.""" from __future__ import print_function import logging import os import random import shutil import ipaddr from BeautifulSoup import BeautifulSoup from twisted.internet import reactor from twisted.internet import task from twisted.trial import unittest from twisted.web.resource import Resource from twisted.web.test import requesthelper from bridgedb import HTTPServer from bridgedb.schedule import ScheduledInterval # For additional logger output for debugging, comment out the following: logging.disable(50) # and then uncomment the following line: #HTTPServer.logging.getLogger().setLevel(10) class ReplaceErrorPageTests(unittest.TestCase): """Tests for :func:`bridgedb.HTTPServer.replaceErrorPage`.""" def test_replaceErrorPage(self): """``replaceErrorPage`` should return the expected html.""" exc = Exception("vegan gümmibären") errorPage = HTTPServer.replaceErrorPage(exc) self.assertSubstring("Something went wrong", errorPage) self.assertNotSubstring("vegan gümmibären", errorPage) class CaptchaProtectedResourceTests(unittest.TestCase): """Tests for :mod:`bridgedb.HTTPServer.CaptchaProtectedResource`.""" def setUp(self): self.dist = None self.sched = None self.pagename = b'bridges.html' self.root = Resource() self.protectedResource = HTTPServer.WebResourceBridges(self.dist, self.sched) self.captchaResource = HTTPServer.CaptchaProtectedResource( useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource) def test_render_GET_noCaptcha(self): """render_GET() should return a page without a CAPTCHA, which has the image alt text. """ request = DummyRequest([self.pagename]) request.method = b'GET' page = self.captchaResource.render_GET(request) self.assertSubstring( "Your browser is not displaying images properly", page) def test_render_GET_missingTemplate(self): """render_GET() with a missing template should raise an error and return the result of replaceErrorPage(). """ oldLookup = HTTPServer.lookup try: HTTPServer.lookup = None request = DummyRequest([self.pagename]) request.method = b'GET' page = self.captchaResource.render_GET(request) errorPage = HTTPServer.replaceErrorPage(Exception('kablam')) self.assertEqual(page, errorPage) finally: HTTPServer.lookup = oldLookup def createRequestWithIPs(self): """Set the IP address returned from ``request.getClientIP()`` to '3.3.3.3', and the IP address reported in the 'X-Forwarded-For' header to '2.2.2.2'. """ request = DummyRequest([self.pagename]) # Since we do not set ``request.getClientIP`` here like we do in some # of the other unittests, an exception would be raised here if # ``getBridgesForRequest()`` is unable to get the IP address from this # 'X-Forwarded-For' header (because ``ip`` would get set to ``None``). request.headers.update({'x-forwarded-for': '2.2.2.2'}) # See :api:`twisted.test.requesthelper.DummyRequest.getClientIP` request.client = requesthelper.IPv4Address('TCP', '3.3.3.3', 443) request.method = b'GET' return request def test_getClientIP_XForwardedFor(self): """CaptchaProtectedResource.getClientIP() should return the IP address from the 'X-Forwarded-For' header when ``useForwardedHeader=True``. """ self.captchaResource.useForwardedHeader = True request = self.createRequestWithIPs() clientIP = self.captchaResource.getClientIP(request) self.assertEqual(clientIP, '2.2.2.2') def test_getClientIP_fromRequest(self): """CaptchaProtectedResource.getClientIP() should return the IP address from the request instance when ``useForwardedHeader=False``. """ self.captchaResource.useForwardedHeader = False request = self.createRequestWithIPs() clientIP = self.captchaResource.getClientIP(request) self.assertEqual(clientIP, '3.3.3.3') def test_render_POST(self): """render_POST() with a wrong 'captcha_response_field' should return a redirect to the CaptchaProtectedResource page. """ request = DummyRequest([self.pagename]) request.method = b'POST' page = self.captchaResource.render_POST(request) self.assertEqual(BeautifulSoup(page).find('meta')['http-equiv'], 'refresh') class GimpCaptchaProtectedResourceTests(unittest.TestCase): """Tests for :mod:`bridgedb.HTTPServer.GimpCaptchaProtectedResource`.""" def setUp(self): """Create a :class:`HTTPServer.WebResourceBridges` and protect it with a :class:`GimpCaptchaProtectedResource`. """ # Create our cached CAPTCHA directory: self.captchaDir = 'captchas' if not os.path.isdir(self.captchaDir): os.makedirs(self.captchaDir) # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'captcha.html' self.root = Resource() # (None, None) is the (distributor, scheduleInterval): self.protectedResource = HTTPServer.WebResourceBridges(None, None) self.captchaResource = HTTPServer.GimpCaptchaProtectedResource( secretKey='42', publicKey='23', hmacKey='abcdefghijklmnopqrstuvwxyz012345', captchaDir='captchas', useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource) # Set up the basic parts of our faked request: self.request = DummyRequest([self.pagename]) def tearDown(self): """Delete the cached CAPTCHA directory if it still exists.""" if os.path.isdir(self.captchaDir): shutil.rmtree(self.captchaDir) def test_extractClientSolution(self): """A (challenge, sollution) pair extracted from a request resulting from a POST should have the same unmodified (challenge, sollution) as the client originally POSTed. """ expectedChallenge = '23232323232323232323' expectedResponse = 'awefawefaefawefaewf' self.request.method = b'POST' self.request.addArg('captcha_challenge_field', expectedChallenge) self.request.addArg('captcha_response_field', expectedResponse) response = self.captchaResource.extractClientSolution(self.request) (challenge, response) = response self.assertEqual(challenge, expectedChallenge) self.assertEqual(response, expectedResponse) def test_checkSolution(self): """checkSolution() should return False is the solution is invalid.""" expectedChallenge = '23232323232323232323' expectedResponse = 'awefawefaefawefaewf' self.request.method = b'POST' self.request.addArg('captcha_challenge_field', expectedChallenge) self.request.addArg('captcha_response_field', expectedResponse) valid = self.captchaResource.checkSolution(self.request) self.assertFalse(valid) def test_getCaptchaImage(self): """Retrieving a (captcha, challenge) pair with an empty captchaDir should return None for both of the (captcha, challenge) strings. """ self.request.method = b'GET' response = self.captchaResource.getCaptchaImage(self.request) (image, challenge) = response # Because we created the directory, there weren't any CAPTCHAs to # retrieve from it: self.assertIs(image, None) self.assertIs(challenge, None) def test_getCaptchaImage_noCaptchaDir(self): """Retrieving a (captcha, challenge) with an missing captchaDir should raise a bridgedb.captcha.GimpCaptchaError. """ shutil.rmtree(self.captchaDir) self.request.method = b'GET' self.assertRaises(HTTPServer.captcha.GimpCaptchaError, self.captchaResource.getCaptchaImage, self.request) def test_render_GET_missingTemplate(self): """render_GET() with a missing template should raise an error and return the result of replaceErrorPage(). """ oldLookup = HTTPServer.lookup try: HTTPServer.lookup = None self.request.method = b'GET' page = self.captchaResource.render_GET(self.request) errorPage = HTTPServer.replaceErrorPage(Exception('kablam')) self.assertEqual(page, errorPage) finally: HTTPServer.lookup = oldLookup def test_render_POST_blankFields(self): """render_POST() with a blank 'captcha_response_field' should return a redirect to the CaptchaProtectedResource page. """ self.request.method = b'POST' self.request.addArg('captcha_challenge_field', '') self.request.addArg('captcha_response_field', '') page = self.captchaResource.render_POST(self.request) self.assertEqual(BeautifulSoup(page).find('meta')['http-equiv'], 'refresh') def test_render_POST_wrongSolution(self): """render_POST() with a wrong 'captcha_response_field' should return a redirect to the CaptchaProtectedResource page. """ expectedChallenge = '23232323232323232323' expectedResponse = 'awefawefaefawefaewf' self.request.method = b'POST' self.request.addArg('captcha_challenge_field', expectedChallenge) self.request.addArg('captcha_response_field', expectedResponse) page = self.captchaResource.render_POST(self.request) self.assertEqual(BeautifulSoup(page).find('meta')['http-equiv'], 'refresh') class ReCaptchaProtectedResourceTests(unittest.TestCase): """Tests for :mod:`bridgedb.HTTPServer.ReCaptchaProtectedResource`.""" def setUp(self): """Create a :class:`HTTPServer.WebResourceBridges` and protect it with a :class:`ReCaptchaProtectedResource`. """ self.timeout = 10.0 # Can't take longer than that, right? # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'captcha.html' self.root = Resource() # (None, None) is the (distributor, scheduleInterval): self.protectedResource = HTTPServer.WebResourceBridges(None, None) self.captchaResource = HTTPServer.ReCaptchaProtectedResource( publicKey='23', secretKey='42', remoteIP='111.111.111.111', useForwardedHeader=True, protectedResource=self.protectedResource) self.root.putChild(self.pagename, self.captchaResource) # Set up the basic parts of our faked request: self.request = DummyRequest([self.pagename]) def tearDown(self): """Cleanup method for removing timed out connections on the reactor. This seems to be the solution for the dirty reactor due to ``DelayedCall``s which is mentioned at the beginning of this file. There doesn't seem to be any documentation anywhere which proposes this solution, although this seems to solve the problem. """ for delay in reactor.getDelayedCalls(): try: delay.cancel() except (AlreadyCalled, AlreadyCancelled): pass def test_renderDeferred_invalid(self): """:meth:`_renderDeferred` should redirect a ``Request`` (after the CAPTCHA was NOT xsuccessfully solved) which results from a ``Deferred``'s callback. """ self.request.method = b'POST' def testCB(request): """Check the ``Request`` returned from ``_renderDeferred``.""" self.assertIsInstance(request, DummyRequest) soup = BeautifulSoup(b''.join(request.written)).find('meta')['http-equiv'] self.assertEqual(soup, 'refresh') d = task.deferLater(reactor, 0, lambda x: x, (False, self.request)) d.addCallback(self.captchaResource._renderDeferred) d.addCallback(testCB) return d def test_renderDeferred_valid(self): """:meth:`_renderDeferred` should correctly render a ``Request`` (after the CAPTCHA has been successfully solved) which results from a ``Deferred``'s callback. """ self.request.method = b'POST' def testCB(request): """Check the ``Request`` returned from ``_renderDeferred``.""" self.assertIsInstance(request, DummyRequest) html = b''.join(request.written) self.assertSubstring('Uh oh, spaghettios!', html) d = task.deferLater(reactor, 0, lambda x: x, (True, self.request)) d.addCallback(self.captchaResource._renderDeferred) d.addCallback(testCB) return d def test_renderDeferred_nontuple(self): """:meth:`_renderDeferred` should correctly render a ``Request`` (after the CAPTCHA has been successfully solved) which results from a ``Deferred``'s callback. """ self.request.method = b'POST' def testCB(request): """Check the ``Request`` returned from ``_renderDeferred``.""" self.assertIs(request, None) d = task.deferLater(reactor, 0, lambda x: x, (self.request)) d.addCallback(self.captchaResource._renderDeferred) d.addCallback(testCB) return d def test_checkSolution_blankFields(self): """:meth:`HTTPServer.ReCaptchaProtectedResource.checkSolution` should return a redirect if is the solution field is blank. """ self.request.method = b'POST' self.request.addArg('captcha_challenge_field', '') self.request.addArg('captcha_response_field', '') self.assertEqual((False, self.request), self.successResultOf( self.captchaResource.checkSolution(self.request))) def test_getRemoteIP_useRandomIP(self): """Check that removing our remoteip setting produces a random IP.""" self.captchaResource.remoteIP = None ip = self.captchaResource.getRemoteIP() realishIP = ipaddr.IPv4Address(ip).compressed self.assertTrue(realishIP) self.assertNotEquals(realishIP, '111.111.111.111') def test_getRemoteIP_useConfiguredIP(self): """Check that our remoteip setting is used if configured.""" ip = self.captchaResource.getRemoteIP() realishIP = ipaddr.IPv4Address(ip).compressed self.assertTrue(realishIP) self.assertEquals(realishIP, '111.111.111.111') def test_render_GET_missingTemplate(self): """render_GET() with a missing template should raise an error and return the result of replaceErrorPage(). """ oldLookup = HTTPServer.lookup try: HTTPServer.lookup = None self.request.method = b'GET' page = self.captchaResource.render_GET(self.request) errorPage = HTTPServer.replaceErrorPage(Exception('kablam')) self.assertEqual(page, errorPage) finally: HTTPServer.lookup = oldLookup def test_render_POST_blankFields(self): """render_POST() with a blank 'captcha_response_field' should return a redirect to the CaptchaProtectedResource page. """ self.request.method = b'POST' self.request.addArg('captcha_challenge_field', '') self.request.addArg('captcha_response_field', '') page = self.captchaResource.render_POST(self.request) self.assertEqual(page, HTTPServer.server.NOT_DONE_YET) def test_render_POST_wrongSolution(self): """render_POST() with a wrong 'captcha_response_field' should return a redirect to the CaptchaProtectedResource page. """ expectedChallenge = '23232323232323232323' expectedResponse = 'awefawefaefawefaewf' self.request.method = b'POST' self.request.addArg('captcha_challenge_field', expectedChallenge) self.request.addArg('captcha_response_field', expectedResponse) page = self.captchaResource.render_POST(self.request) self.assertEqual(page, HTTPServer.server.NOT_DONE_YET) class DummyBridge(object): """A mock :class:`bridgedb.Bridges.Bridge` which only supports a mocked ``getConfigLine`` method.""" def _randORPort(self): return random.randint(9001, 9999) def _randPTPort(self): return random.randint(6001, 6666) def _returnFour(self): return random.randint(2**24, 2**32-1) def _returnSix(self): return random.randint(2**24, 2**128-1) def __init__(self, transports=[]): """Create a mocked bridge suitable for testing distributors and web resource rendering. """ self.nickname = "bridge-{0}".format(self._returnFour()) self.ip = ipaddr.IPv4Address(self._returnFour()) self.orport = self._randORPort() self.transports = transports self.running = True self.stable = True self.blockingCountries = {} self.desc_digest = None self.ei_digest = None self.verified = False self.fingerprint = "".join(random.choice('abcdef0123456789') for _ in xrange(40)) self.or_addresses = {ipaddr.IPv6Address(self._returnSix()): self._randORPort()} def getConfigLine(self, includeFingerprint=True, addressClass=ipaddr.IPv4Address, transport=None, request=None): """Get a "torrc" bridge config line to give to a client.""" line = [] if transport is not None: #print("getConfigLine got transport=%r" % transport) line.append(str(transport)) line.append("%s:%s" % (self.ip, self.orport)) if includeFingerprint is True: line.append(self.fingerprint) bridgeLine = " ".join([item for item in line]) #print "Created config line: %r" % bridgeLine return bridgeLine class DummyIPBasedDistributor(object): """A mocked :class:`bridgedb.Dist.IPBasedDistributor` which is used to test :class:`bridgedb.HTTPServer.WebResourceBridges. """ def _dumbAreaMapper(ip): return ip def __init__(self, areaMapper=None, nClusters=None, key=None, ipCategories=None, answerParameters=None): """None of the parameters are really used, they are just there to retain an identical method signature. """ self.areaMapper = self._dumbAreaMapper self.nClusters = 3 self.nBridgesToGive = 3 self.key = self.__class__.__name__ self.ipCategories = ipCategories self.answerParameters = answerParameters def getBridgesForIP(self, ip=None, epoch=None, N=1, countyCode=None, bridgeFilterRules=None): """Needed because it's called in :meth:`WebResourceBridges.getBridgesForIP`. """ return [DummyBridge() for _ in xrange(N)] class DummyRequest(requesthelper.DummyRequest): """Wrapper for :api:`twisted.test.requesthelper.DummyRequest` to add redirect support. """ def __init__(self, *args, **kwargs): requesthelper.DummyRequest.__init__(self, *args, **kwargs) self.redirect = self._redirect(self) def URLPath(self): """Fake the missing Request.URLPath too.""" return self.uri def _redirect(self, request): """Stub method to add a redirect() method to DummyResponse.""" newRequest = type(request) newRequest.uri = request.uri return newRequest class WebResourceBridgesTests(unittest.TestCase): """Tests for :class:`HTTPServer.WebResourceBridges`.""" def setUp(self): """Set up our resources to fake a minimal HTTP(S) server.""" self.pagename = b'bridges.html' self.root = Resource() self.dist = DummyIPBasedDistributor() self.sched = ScheduledInterval('hour', 1) self.nBridgesPerRequest = 2 self.bridgesResource = HTTPServer.WebResourceBridges( self.dist, self.sched, N=2, #useForwardedHeader=True, includeFingerprints=True) self.root.putChild(self.pagename, self.bridgesResource) def parseBridgesFromHTMLPage(self, page): """Utility to pull the bridge lines out of an HTML response page. :param str page: A rendered HTML page, as a string. :raises: Any error which might occur. :rtype: list :returns: A list of the bridge lines contained on the **page**. """ # The bridge lines are contained in a <div class='bridges'> tag: soup = BeautifulSoup(page) well = soup.find('div', {'class': 'bridge-lines'}) content = well.renderContents().strip() bridges = [b.strip() for b in content.splitlines()] return bridges def test_render_GET_vanilla(self): """Test rendering a request for normal, vanilla bridges.""" request = DummyRequest([self.pagename]) request.method = b'GET' request.getClientIP = lambda: '1.1.1.1' page = self.bridgesResource.render(request) # The response should explain how to use the bridge lines: self.assertTrue("To enter bridges into Tor Browser" in str(page)) for b in self.parseBridgesFromHTMLPage(page): # Check that each bridge line had the expected number of fields: fields = b.split(' ') self.assertEqual(len(fields), 2) # Check that the IP and port seem okay: ip, port = fields[0].rsplit(':') self.assertIsInstance(ipaddr.IPv4Address(ip), ipaddr.IPv4Address) self.assertIsInstance(int(port), int) self.assertGreater(int(port), 0) self.assertLessEqual(int(port), 65535) def test_render_GET_XForwardedFor(self): """The client's IP address should be obtainable from the 'X-Forwarded-For' header in the request. """ self.bridgesResource.useForwardedHeader = True request = DummyRequest([self.pagename]) request.method = b'GET' # Since we do not set ``request.getClientIP`` here like we do in some # of the other unittests, an exception would be raised here if # ``getBridgesForRequest()`` is unable to get the IP address from this # 'X-Forwarded-For' header (because ``ip`` would get set to ``None``). request.headers.update({'x-forwarded-for': '2.2.2.2'}) page = self.bridgesResource.render(request) self.bridgesResource.useForwardedHeader = False # Reset it # The response should explain how to use the bridge lines: self.assertTrue("To enter bridges into Tor Browser" in str(page)) def test_render_GET_RTLlang(self): """Test rendering a request for plain bridges in Arabic.""" request = DummyRequest([b"bridges?transport=obfs3"]) request.method = b'GET' request.getClientIP = lambda: '3.3.3.3' # For some strange reason, the 'Accept-Language' value *should not* be # a list, unlike all the other headers and args… request.headers.update({'accept-language': 'ar,en,en_US,'}) page = self.bridgesResource.render(request) self.assertSubstring("direction: rtl", page) self.assertSubstring( # "I need an alternative way to get bridges!" "انا بحاجة إلى وسيلة بديلة للحصول على الجسور!", page) for bridgeLine in self.parseBridgesFromHTMLPage(page): # Check that each bridge line had the expected number of fields: bridgeLine = bridgeLine.split(' ') self.assertEqual(len(bridgeLine), 2) def test_render_GET_RTLlang_obfs3(self): """Test rendering a request for obfs3 bridges in Farsi.""" request = DummyRequest([b"bridges?transport=obfs3"]) request.method = b'GET' request.getClientIP = lambda: '3.3.3.3' request.headers.update({'accept-language': 'fa,en,en_US,'}) # We actually have to set the request args manually when using a # DummyRequest: request.args.update({'transport': ['obfs3']}) page = self.bridgesResource.render(request) self.assertSubstring("direction: rtl", page) self.assertSubstring( # "I need an alternative way to get bridges!" "به یک راه دیگر برای دریافت پل‌ها احتیاج دارم!", page) for bridgeLine in self.parseBridgesFromHTMLPage(page): # Check that each bridge line had the expected number of fields: bridgeLine = bridgeLine.split(' ') self.assertEqual(len(bridgeLine), 3) self.assertEqual(bridgeLine[0], 'obfs3') # Check that the IP and port seem okay: ip, port = bridgeLine[1].rsplit(':') self.assertIsInstance(ipaddr.IPv4Address(ip), ipaddr.IPv4Address) self.assertIsInstance(int(port), int) self.assertGreater(int(port), 0) self.assertLessEqual(int(port), 65535) def test_renderAnswer_textplain(self): """If the request format specifies 'plain', we should return content with mimetype 'text/plain'. """ request = DummyRequest([self.pagename]) request.args.update({'format': ['plain']}) request.getClientIP = lambda: '4.4.4.4' request.method = b'GET' page = self.bridgesResource.render(request) self.assertTrue("html" not in str(page)) # We just need to strip and split it because it looks like: # # 94.235.85.233:9492 0d9d0547c3471cddc473f7288a6abfb54562dc06 # 255.225.204.145:9511 1fb89d618b3a12afe3529fd072127ea08fb50466 # # (Yes, there are two leading spaces at the beginning of each line) # bridgeLines = [line.strip() for line in page.strip().split('\n')] for bridgeLine in bridgeLines: bridgeLine = bridgeLine.split(' ') self.assertEqual(len(bridgeLine), 2) # Check that the IP and port seem okay: ip, port = bridgeLine[0].rsplit(':') self.assertIsInstance(ipaddr.IPv4Address(ip), ipaddr.IPv4Address) self.assertIsInstance(int(port), int) self.assertGreater(int(port), 0) self.assertLessEqual(int(port), 65535) class WebResourceOptionsTests(unittest.TestCase): """Tests for :class:`bridgedb.HTTPServer.WebResourceOptions`.""" def setUp(self): """Create a :class:`HTTPServer.WebResourceOptions`.""" # Set up our resources to fake a minimal HTTP(S) server: self.pagename = b'options.html' self.root = Resource() self.optionsResource = HTTPServer.WebResourceOptions() self.root.putChild(self.pagename, self.optionsResource) def test_render_GET_RTLlang(self): """Test rendering a request for obfs3 bridges in Arabic.""" request = DummyRequest(["bridges?transport=obfs3"]) request.method = b'GET' request.getClientIP = lambda: '3.3.3.3' request.headers.update({'accept-language': 'he'}) # We actually have to set the request args manually when using a # DummyRequest: request.args.update({'transport': ['obfs2']}) page = self.optionsResource.render(request) self.assertSubstring("direction: rtl", page) self.assertSubstring("מהם גשרים?", page)
demonshreder/pirate
refs/heads/master
user/migrations/0005_communities_code.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-27 14:11 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('user', '0004_friendrequests'), ] operations = [ migrations.AddField( model_name='communities', name='code', field=models.CharField(max_length=200, null=True), ), ]
ahmetabdi/SickRage
refs/heads/master
lib/unidecode/x05b.py
252
data = ( 'Gui ', # 0x00 'Deng ', # 0x01 'Zhi ', # 0x02 'Xu ', # 0x03 'Yi ', # 0x04 'Hua ', # 0x05 'Xi ', # 0x06 'Hui ', # 0x07 'Rao ', # 0x08 'Xi ', # 0x09 'Yan ', # 0x0a 'Chan ', # 0x0b 'Jiao ', # 0x0c 'Mei ', # 0x0d 'Fan ', # 0x0e 'Fan ', # 0x0f 'Xian ', # 0x10 'Yi ', # 0x11 'Wei ', # 0x12 'Jiao ', # 0x13 'Fu ', # 0x14 'Shi ', # 0x15 'Bi ', # 0x16 'Shan ', # 0x17 'Sui ', # 0x18 'Qiang ', # 0x19 'Lian ', # 0x1a 'Huan ', # 0x1b 'Xin ', # 0x1c 'Niao ', # 0x1d 'Dong ', # 0x1e 'Yi ', # 0x1f 'Can ', # 0x20 'Ai ', # 0x21 'Niang ', # 0x22 'Neng ', # 0x23 'Ma ', # 0x24 'Tiao ', # 0x25 'Chou ', # 0x26 'Jin ', # 0x27 'Ci ', # 0x28 'Yu ', # 0x29 'Pin ', # 0x2a 'Yong ', # 0x2b 'Xu ', # 0x2c 'Nai ', # 0x2d 'Yan ', # 0x2e 'Tai ', # 0x2f 'Ying ', # 0x30 'Can ', # 0x31 'Niao ', # 0x32 'Wo ', # 0x33 'Ying ', # 0x34 'Mian ', # 0x35 'Kaka ', # 0x36 'Ma ', # 0x37 'Shen ', # 0x38 'Xing ', # 0x39 'Ni ', # 0x3a 'Du ', # 0x3b 'Liu ', # 0x3c 'Yuan ', # 0x3d 'Lan ', # 0x3e 'Yan ', # 0x3f 'Shuang ', # 0x40 'Ling ', # 0x41 'Jiao ', # 0x42 'Niang ', # 0x43 'Lan ', # 0x44 'Xian ', # 0x45 'Ying ', # 0x46 'Shuang ', # 0x47 'Shuai ', # 0x48 'Quan ', # 0x49 'Mi ', # 0x4a 'Li ', # 0x4b 'Luan ', # 0x4c 'Yan ', # 0x4d 'Zhu ', # 0x4e 'Lan ', # 0x4f 'Zi ', # 0x50 'Jie ', # 0x51 'Jue ', # 0x52 'Jue ', # 0x53 'Kong ', # 0x54 'Yun ', # 0x55 'Zi ', # 0x56 'Zi ', # 0x57 'Cun ', # 0x58 'Sun ', # 0x59 'Fu ', # 0x5a 'Bei ', # 0x5b 'Zi ', # 0x5c 'Xiao ', # 0x5d 'Xin ', # 0x5e 'Meng ', # 0x5f 'Si ', # 0x60 'Tai ', # 0x61 'Bao ', # 0x62 'Ji ', # 0x63 'Gu ', # 0x64 'Nu ', # 0x65 'Xue ', # 0x66 '[?] ', # 0x67 'Zhuan ', # 0x68 'Hai ', # 0x69 'Luan ', # 0x6a 'Sun ', # 0x6b 'Huai ', # 0x6c 'Mie ', # 0x6d 'Cong ', # 0x6e 'Qian ', # 0x6f 'Shu ', # 0x70 'Chan ', # 0x71 'Ya ', # 0x72 'Zi ', # 0x73 'Ni ', # 0x74 'Fu ', # 0x75 'Zi ', # 0x76 'Li ', # 0x77 'Xue ', # 0x78 'Bo ', # 0x79 'Ru ', # 0x7a 'Lai ', # 0x7b 'Nie ', # 0x7c 'Nie ', # 0x7d 'Ying ', # 0x7e 'Luan ', # 0x7f 'Mian ', # 0x80 'Zhu ', # 0x81 'Rong ', # 0x82 'Ta ', # 0x83 'Gui ', # 0x84 'Zhai ', # 0x85 'Qiong ', # 0x86 'Yu ', # 0x87 'Shou ', # 0x88 'An ', # 0x89 'Tu ', # 0x8a 'Song ', # 0x8b 'Wan ', # 0x8c 'Rou ', # 0x8d 'Yao ', # 0x8e 'Hong ', # 0x8f 'Yi ', # 0x90 'Jing ', # 0x91 'Zhun ', # 0x92 'Mi ', # 0x93 'Zhu ', # 0x94 'Dang ', # 0x95 'Hong ', # 0x96 'Zong ', # 0x97 'Guan ', # 0x98 'Zhou ', # 0x99 'Ding ', # 0x9a 'Wan ', # 0x9b 'Yi ', # 0x9c 'Bao ', # 0x9d 'Shi ', # 0x9e 'Shi ', # 0x9f 'Chong ', # 0xa0 'Shen ', # 0xa1 'Ke ', # 0xa2 'Xuan ', # 0xa3 'Shi ', # 0xa4 'You ', # 0xa5 'Huan ', # 0xa6 'Yi ', # 0xa7 'Tiao ', # 0xa8 'Shi ', # 0xa9 'Xian ', # 0xaa 'Gong ', # 0xab 'Cheng ', # 0xac 'Qun ', # 0xad 'Gong ', # 0xae 'Xiao ', # 0xaf 'Zai ', # 0xb0 'Zha ', # 0xb1 'Bao ', # 0xb2 'Hai ', # 0xb3 'Yan ', # 0xb4 'Xiao ', # 0xb5 'Jia ', # 0xb6 'Shen ', # 0xb7 'Chen ', # 0xb8 'Rong ', # 0xb9 'Huang ', # 0xba 'Mi ', # 0xbb 'Kou ', # 0xbc 'Kuan ', # 0xbd 'Bin ', # 0xbe 'Su ', # 0xbf 'Cai ', # 0xc0 'Zan ', # 0xc1 'Ji ', # 0xc2 'Yuan ', # 0xc3 'Ji ', # 0xc4 'Yin ', # 0xc5 'Mi ', # 0xc6 'Kou ', # 0xc7 'Qing ', # 0xc8 'Que ', # 0xc9 'Zhen ', # 0xca 'Jian ', # 0xcb 'Fu ', # 0xcc 'Ning ', # 0xcd 'Bing ', # 0xce 'Huan ', # 0xcf 'Mei ', # 0xd0 'Qin ', # 0xd1 'Han ', # 0xd2 'Yu ', # 0xd3 'Shi ', # 0xd4 'Ning ', # 0xd5 'Qin ', # 0xd6 'Ning ', # 0xd7 'Zhi ', # 0xd8 'Yu ', # 0xd9 'Bao ', # 0xda 'Kuan ', # 0xdb 'Ning ', # 0xdc 'Qin ', # 0xdd 'Mo ', # 0xde 'Cha ', # 0xdf 'Ju ', # 0xe0 'Gua ', # 0xe1 'Qin ', # 0xe2 'Hu ', # 0xe3 'Wu ', # 0xe4 'Liao ', # 0xe5 'Shi ', # 0xe6 'Zhu ', # 0xe7 'Zhai ', # 0xe8 'Shen ', # 0xe9 'Wei ', # 0xea 'Xie ', # 0xeb 'Kuan ', # 0xec 'Hui ', # 0xed 'Liao ', # 0xee 'Jun ', # 0xef 'Huan ', # 0xf0 'Yi ', # 0xf1 'Yi ', # 0xf2 'Bao ', # 0xf3 'Qin ', # 0xf4 'Chong ', # 0xf5 'Bao ', # 0xf6 'Feng ', # 0xf7 'Cun ', # 0xf8 'Dui ', # 0xf9 'Si ', # 0xfa 'Xun ', # 0xfb 'Dao ', # 0xfc 'Lu ', # 0xfd 'Dui ', # 0xfe 'Shou ', # 0xff )
ronaflx/cpp-utility
refs/heads/master
.ycm_extra_conf.py
1
# This file is NOT licensed under the GPLv3, which is the license for the rest # of YouCompleteMe. # # Here's the license text for this file: # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # For more information, please refer to <http://unlicense.org/> import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Wextra', '-Werror', '-Wno-long-long', '-Wno-variadic-macros', '-fexceptions', '-DNDEBUG', # You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM # source code needs it. '-DUSE_CLANG_COMPLETER', # THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which # language to use when compiling headers. So it will guess. Badly. So C++ # headers will be compiled as C headers. You don't want that so ALWAYS specify # a "-std=<something>". # For a C project, you would set this to something like 'c99' instead of # 'c++11'. '-std=c++11', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-isystem', # This path will only work on OS X, but extra paths that don't exist are not # harmful '/System/Library/Frameworks/Python.framework/Headers', '-I', '.', '-isystem', './build/third_party/gmock/include', '-isystem', './build/third_party/gtest/include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1', '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # You can get CMake to generate this file for you by adding: # set( CMAKE_EXPORT_COMPILE_COMMANDS 1 ) # to your CMakeLists.txt file. # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. try: final_flags.remove( '-stdlib=libc++' ) except ValueError: pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
sujeet4github/MyLangUtils
refs/heads/master
LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/pip/_vendor/distlib/wheel.py
412
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2016 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import base64 import codecs import datetime import distutils.util from email import message_from_file import hashlib import imp import json import logging import os import posixpath import re import shutil import sys import tempfile import zipfile from . import __version__, DistlibException from .compat import sysconfig, ZipFile, fsdecode, text_type, filter from .database import InstalledDistribution from .metadata import Metadata, METADATA_FILENAME from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, cached_property, get_cache_base, read_exports, tempdir) from .version import NormalizedVersion, UnsupportedVersionError logger = logging.getLogger(__name__) cache = None # created when needed if hasattr(sys, 'pypy_version_info'): IMP_PREFIX = 'pp' elif sys.platform.startswith('java'): IMP_PREFIX = 'jy' elif sys.platform == 'cli': IMP_PREFIX = 'ip' else: IMP_PREFIX = 'cp' VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') if not VER_SUFFIX: # pragma: no cover VER_SUFFIX = '%s%s' % sys.version_info[:2] PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') ABI = sysconfig.get_config_var('SOABI') if ABI and ABI.startswith('cpython-'): ABI = ABI.replace('cpython-', 'cp') else: def _derive_abi(): parts = ['cp', VER_SUFFIX] if sysconfig.get_config_var('Py_DEBUG'): parts.append('d') if sysconfig.get_config_var('WITH_PYMALLOC'): parts.append('m') if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: parts.append('u') return ''.join(parts) ABI = _derive_abi() del _derive_abi FILENAME_RE = re.compile(r''' (?P<nm>[^-]+) -(?P<vn>\d+[^-]*) (-(?P<bn>\d+[^-]*))? -(?P<py>\w+\d+(\.\w+\d+)*) -(?P<bi>\w+) -(?P<ar>\w+(\.\w+)*) \.whl$ ''', re.IGNORECASE | re.VERBOSE) NAME_VERSION_RE = re.compile(r''' (?P<nm>[^-]+) -(?P<vn>\d+[^-]*) (-(?P<bn>\d+[^-]*))?$ ''', re.IGNORECASE | re.VERBOSE) SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') SHEBANG_PYTHON = b'#!python' SHEBANG_PYTHONW = b'#!pythonw' if os.sep == '/': to_posix = lambda o: o else: to_posix = lambda o: o.replace(os.sep, '/') class Mounter(object): def __init__(self): self.impure_wheels = {} self.libs = {} def add(self, pathname, extensions): self.impure_wheels[pathname] = extensions self.libs.update(extensions) def remove(self, pathname): extensions = self.impure_wheels.pop(pathname) for k, v in extensions: if k in self.libs: del self.libs[k] def find_module(self, fullname, path=None): if fullname in self.libs: result = self else: result = None return result def load_module(self, fullname): if fullname in sys.modules: result = sys.modules[fullname] else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) result = imp.load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: result.__package__ = parts[0] return result _hook = Mounter() class Wheel(object): """ Class to build and install from Wheel files (PEP 427). """ wheel_version = (1, 1) hash_kind = 'sha256' def __init__(self, filename=None, sign=False, verify=False): """ Initialise an instance using a (valid) filename. """ self.sign = sign self.should_verify = verify self.buildver = '' self.pyver = [PYVER] self.abi = ['none'] self.arch = ['any'] self.dirname = os.getcwd() if filename is None: self.name = 'dummy' self.version = '0.1' self._filename = self.filename else: m = NAME_VERSION_RE.match(filename) if m: info = m.groupdict('') self.name = info['nm'] # Reinstate the local version separator self.version = info['vn'].replace('_', '-') self.buildver = info['bn'] self._filename = self.filename else: dirname, filename = os.path.split(filename) m = FILENAME_RE.match(filename) if not m: raise DistlibException('Invalid name or ' 'filename: %r' % filename) if dirname: self.dirname = os.path.abspath(dirname) self._filename = filename info = m.groupdict('') self.name = info['nm'] self.version = info['vn'] self.buildver = info['bn'] self.pyver = info['py'].split('.') self.abi = info['bi'].split('.') self.arch = info['ar'].split('.') @property def filename(self): """ Build and return a filename from the various components. """ if self.buildver: buildver = '-' + self.buildver else: buildver = '' pyver = '.'.join(self.pyver) abi = '.'.join(self.abi) arch = '.'.join(self.arch) # replace - with _ as a local version separator version = self.version.replace('-', '_') return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch) @property def exists(self): path = os.path.join(self.dirname, self.filename) return os.path.isfile(path) @property def tags(self): for pyver in self.pyver: for abi in self.abi: for arch in self.arch: yield pyver, abi, arch @cached_property def metadata(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: wheel_metadata = self.get_wheel_metadata(zf) wv = wheel_metadata['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) if file_version < (1, 1): fn = 'METADATA' else: fn = METADATA_FILENAME try: metadata_filename = posixpath.join(info_dir, fn) with zf.open(metadata_filename) as bf: wf = wrapper(bf) result = Metadata(fileobj=wf) except KeyError: raise ValueError('Invalid wheel, because %s is ' 'missing' % fn) return result def get_wheel_metadata(self, zf): name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver metadata_filename = posixpath.join(info_dir, 'WHEEL') with zf.open(metadata_filename) as bf: wf = codecs.getreader('utf-8')(bf) message = message_from_file(wf) return dict(message) @cached_property def info(self): pathname = os.path.join(self.dirname, self.filename) with ZipFile(pathname, 'r') as zf: result = self.get_wheel_metadata(zf) return result def process_shebang(self, data): m = SHEBANG_RE.match(data) if m: end = m.end() shebang, data_after_shebang = data[:end], data[end:] # Preserve any arguments after the interpreter if b'pythonw' in shebang.lower(): shebang_python = SHEBANG_PYTHONW else: shebang_python = SHEBANG_PYTHON m = SHEBANG_DETAIL_RE.match(shebang) if m: args = b' ' + m.groups()[-1] else: args = b'' shebang = shebang_python + args data = shebang + data_after_shebang else: cr = data.find(b'\r') lf = data.find(b'\n') if cr < 0 or cr > lf: term = b'\n' else: if data[cr:cr + 2] == b'\r\n': term = b'\r\n' else: term = b'\r' data = SHEBANG_PYTHON + term + data return data def get_hash(self, data, hash_kind=None): if hash_kind is None: hash_kind = self.hash_kind try: hasher = getattr(hashlib, hash_kind) except AttributeError: raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) result = hasher(data).digest() result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') return hash_kind, result def write_record(self, records, record_path, base): records = list(records) # make a copy for sorting p = to_posix(os.path.relpath(record_path, base)) records.append((p, '', '')) records.sort() with CSVWriter(record_path) as writer: for row in records: writer.writerow(row) def write_records(self, info, libdir, archive_paths): records = [] distinfo, info_dir = info hasher = getattr(hashlib, self.hash_kind) for ap, p in archive_paths: with open(p, 'rb') as f: data = f.read() digest = '%s=%s' % self.get_hash(data) size = os.path.getsize(p) records.append((ap, digest, size)) p = os.path.join(distinfo, 'RECORD') self.write_record(records, p, libdir) ap = to_posix(os.path.join(info_dir, 'RECORD')) archive_paths.append((ap, p)) def build_zip(self, pathname, archive_paths): with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: for ap, p in archive_paths: logger.debug('Wrote %s to %s in wheel', p, ap) zf.write(p, ap) def build(self, paths, tags=None, wheel_version=None): """ Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel. """ if tags is None: tags = {} libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] if libkey == 'platlib': is_pure = 'false' default_pyver = [IMPVER] default_abi = [ABI] default_arch = [ARCH] else: is_pure = 'true' default_pyver = [PYVER] default_abi = ['none'] default_arch = ['any'] self.pyver = tags.get('pyver', default_pyver) self.abi = tags.get('abi', default_abi) self.arch = tags.get('arch', default_arch) libdir = paths[libkey] name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver archive_paths = [] # First, stuff which is not in site-packages for key in ('data', 'headers', 'scripts'): if key not in paths: continue path = paths[key] if os.path.isdir(path): for root, dirs, files in os.walk(path): for fn in files: p = fsdecode(os.path.join(root, fn)) rp = os.path.relpath(p, path) ap = to_posix(os.path.join(data_dir, key, rp)) archive_paths.append((ap, p)) if key == 'scripts' and not p.endswith('.exe'): with open(p, 'rb') as f: data = f.read() data = self.process_shebang(data) with open(p, 'wb') as f: f.write(data) # Now, stuff which is in site-packages, other than the # distinfo stuff. path = libdir distinfo = None for root, dirs, files in os.walk(path): if root == path: # At the top level only, save distinfo for later # and skip it for now for i, dn in enumerate(dirs): dn = fsdecode(dn) if dn.endswith('.dist-info'): distinfo = os.path.join(root, dn) del dirs[i] break assert distinfo, '.dist-info directory expected, not found' for fn in files: # comment out next suite to leave .pyc files in if fsdecode(fn).endswith(('.pyc', '.pyo')): continue p = os.path.join(root, fn) rp = to_posix(os.path.relpath(p, path)) archive_paths.append((rp, p)) # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. files = os.listdir(distinfo) for fn in files: if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): p = fsdecode(os.path.join(distinfo, fn)) ap = to_posix(os.path.join(info_dir, fn)) archive_paths.append((ap, p)) wheel_metadata = [ 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), 'Generator: distlib %s' % __version__, 'Root-Is-Purelib: %s' % is_pure, ] for pyver, abi, arch in self.tags: wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) p = os.path.join(distinfo, 'WHEEL') with open(p, 'w') as f: f.write('\n'.join(wheel_metadata)) ap = to_posix(os.path.join(info_dir, 'WHEEL')) archive_paths.append((ap, p)) # Now, at last, RECORD. # Paths in here are archive paths - nothing else makes sense. self.write_records((distinfo, info_dir), libdir, archive_paths) # Now, ready to build the zip file pathname = os.path.join(self.dirname, self.filename) self.build_zip(pathname, archive_paths) return pathname def install(self, paths, maker, **kwargs): """ Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. """ dry_run = maker.dry_run warner = kwargs.get('warner') lib_only = kwargs.get('lib_only', False) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) if (file_version != self.wheel_version) and warner: warner(self.wheel_version, file_version) if message['Root-Is-Purelib'] == 'true': libdir = paths['purelib'] else: libdir = paths['platlib'] records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row data_pfx = posixpath.join(data_dir, '') info_pfx = posixpath.join(info_dir, '') script_pfx = posixpath.join(data_dir, 'scripts', '') # make a new instance rather than a copy of maker's, # as we mutate it fileop = FileOperator(dry_run=dry_run) fileop.record = True # so we can rollback if needed bc = not sys.dont_write_bytecode # Double negatives. Lovely! outfiles = [] # for RECORD writing # for script copying/shebang processing workdir = tempfile.mkdtemp() # set target dir later # we default add_launchers to False, as the # Python Launcher should be used instead maker.source_dir = workdir maker.target_dir = None try: for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') # The signature file won't be in RECORD, # and we don't currently don't do anything with it if u_arcname.endswith('/RECORD.jws'): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) if lib_only and u_arcname.startswith((info_pfx, data_pfx)): logger.debug('lib_only: skipping %s', u_arcname) continue is_script = (u_arcname.startswith(script_pfx) and not u_arcname.endswith('.exe')) if u_arcname.startswith(data_pfx): _, where, rp = u_arcname.split('/', 2) outfile = os.path.join(paths[where], convert_path(rp)) else: # meant for site-packages. if u_arcname in (wheel_metadata_name, record_name): continue outfile = os.path.join(libdir, convert_path(u_arcname)) if not is_script: with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) outfiles.append(outfile) # Double check the digest of the written file if not dry_run and row[1]: with open(outfile, 'rb') as bf: data = bf.read() _, newdigest = self.get_hash(data, kind) if newdigest != digest: raise DistlibException('digest mismatch ' 'on write for ' '%s' % outfile) if bc and outfile.endswith('.py'): try: pyc = fileop.byte_compile(outfile) outfiles.append(pyc) except Exception: # Don't give up if byte-compilation fails, # but log it and perhaps warn the user logger.warning('Byte-compilation failed', exc_info=True) else: fn = os.path.basename(convert_path(arcname)) workname = os.path.join(workdir, fn) with zf.open(arcname) as bf: fileop.copy_stream(bf, workname) dn, fn = os.path.split(outfile) maker.target_dir = dn filenames = maker.make(fn) fileop.set_executable_mode(filenames) outfiles.extend(filenames) if lib_only: logger.debug('lib_only: returning None') dist = None else: # Generate scripts # Try to get pydist.json so we can see if there are # any commands to generate. If this fails (e.g. because # of a legacy wheel), log a warning but don't give up. commands = None file_version = self.info['Wheel-Version'] if file_version == '1.0': # Use legacy info ep = posixpath.join(info_dir, 'entry_points.txt') try: with zf.open(ep) as bwf: epdata = read_exports(bwf) commands = {} for key in ('console', 'gui'): k = '%s_scripts' % key if k in epdata: commands['wrap_%s' % key] = d = {} for v in epdata[k].values(): s = '%s:%s' % (v.prefix, v.suffix) if v.flags: s += ' %s' % v.flags d[v.name] = s except Exception: logger.warning('Unable to read legacy script ' 'metadata, so cannot generate ' 'scripts') else: try: with zf.open(metadata_name) as bwf: wf = wrapper(bwf) commands = json.load(wf).get('extensions') if commands: commands = commands.get('python.commands') except Exception: logger.warning('Unable to read JSON metadata, so ' 'cannot generate scripts') if commands: console_scripts = commands.get('wrap_console', {}) gui_scripts = commands.get('wrap_gui', {}) if console_scripts or gui_scripts: script_dir = paths.get('scripts', '') if not os.path.isdir(script_dir): raise ValueError('Valid script path not ' 'specified') maker.target_dir = script_dir for k, v in console_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script) fileop.set_executable_mode(filenames) if gui_scripts: options = {'gui': True } for k, v in gui_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script, options) fileop.set_executable_mode(filenames) p = os.path.join(libdir, info_dir) dist = InstalledDistribution(p) # Write SHARED paths = dict(paths) # don't change passed in dict del paths['purelib'] del paths['platlib'] paths['lib'] = libdir p = dist.write_shared_locations(paths, dry_run) if p: outfiles.append(p) # Write RECORD dist.write_installed_files(outfiles, paths['prefix'], dry_run) return dist except Exception: # pragma: no cover logger.exception('installation failed.') fileop.rollback() raise finally: shutil.rmtree(workdir) def _get_dylib_cache(self): global cache if cache is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('dylib-cache'), sys.version[:3]) cache = Cache(base) return cache def _get_extensions(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver arcname = posixpath.join(info_dir, 'EXTENSIONS') wrapper = codecs.getreader('utf-8') result = [] with ZipFile(pathname, 'r') as zf: try: with zf.open(arcname) as bf: wf = wrapper(bf) extensions = json.load(wf) cache = self._get_dylib_cache() prefix = cache.prefix_to_dir(pathname) cache_base = os.path.join(cache.base, prefix) if not os.path.isdir(cache_base): os.makedirs(cache_base) for name, relpath in extensions.items(): dest = os.path.join(cache_base, convert_path(relpath)) if not os.path.exists(dest): extract = True else: file_time = os.stat(dest).st_mtime file_time = datetime.datetime.fromtimestamp(file_time) info = zf.getinfo(relpath) wheel_time = datetime.datetime(*info.date_time) extract = wheel_time > file_time if extract: zf.extract(relpath, cache_base) result.append((name, dest)) except KeyError: pass return result def is_compatible(self): """ Determine if a wheel is compatible with the running system. """ return is_compatible(self) def is_mountable(self): """ Determine if a wheel is asserted as mountable by its metadata. """ return True # for now - metadata details TBD def mount(self, append=False): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if not self.is_compatible(): msg = 'Wheel %s not compatible with this Python.' % pathname raise DistlibException(msg) if not self.is_mountable(): msg = 'Wheel %s is marked as not mountable.' % pathname raise DistlibException(msg) if pathname in sys.path: logger.debug('%s already in path', pathname) else: if append: sys.path.append(pathname) else: sys.path.insert(0, pathname) extensions = self._get_extensions() if extensions: if _hook not in sys.meta_path: sys.meta_path.append(_hook) _hook.add(pathname, extensions) def unmount(self): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if pathname not in sys.path: logger.debug('%s not in path', pathname) else: sys.path.remove(pathname) if pathname in _hook.impure_wheels: _hook.remove(pathname) if not _hook.impure_wheels: if _hook in sys.meta_path: sys.meta_path.remove(_hook) def verify(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) # TODO version verification records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if '..' in u_arcname: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) # The signature file won't be in RECORD, # and we don't currently don't do anything with it if u_arcname.endswith('/RECORD.jws'): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) def update(self, modifier, dest_dir=None, **kwargs): """ Update the contents of a wheel in a generic way. The modifier should be a callable which expects a dictionary argument: its keys are archive-entry paths, and its values are absolute filesystem paths where the contents the corresponding archive entries can be found. The modifier is free to change the contents of the files pointed to, add new entries and remove entries, before returning. This method will extract the entire contents of the wheel to a temporary location, call the modifier, and then use the passed (and possibly updated) dictionary to write a new wheel. If ``dest_dir`` is specified, the new wheel is written there -- otherwise, the original wheel is overwritten. The modifier should return True if it updated the wheel, else False. This method returns the same value the modifier returns. """ def get_version(path_map, info_dir): version = path = None key = '%s/%s' % (info_dir, METADATA_FILENAME) if key not in path_map: key = '%s/PKG-INFO' % info_dir if key in path_map: path = path_map[key] version = Metadata(path=path).version return version, path def update_version(version, path): updated = None try: v = NormalizedVersion(version) i = version.find('-') if i < 0: updated = '%s+1' % version else: parts = [int(s) for s in version[i + 1:].split('.')] parts[-1] += 1 updated = '%s+%s' % (version[:i], '.'.join(str(i) for i in parts)) except UnsupportedVersionError: logger.debug('Cannot update non-compliant (PEP-440) ' 'version %r', version) if updated: md = Metadata(path=path) md.version = updated legacy = not path.endswith(METADATA_FILENAME) md.write(path=path, legacy=legacy) logger.debug('Version updated from %r to %r', version, updated) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver record_name = posixpath.join(info_dir, 'RECORD') with tempdir() as workdir: with ZipFile(pathname, 'r') as zf: path_map = {} for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if u_arcname == record_name: continue if '..' in u_arcname: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) zf.extract(zinfo, workdir) path = os.path.join(workdir, convert_path(u_arcname)) path_map[u_arcname] = path # Remember the version. original_version, _ = get_version(path_map, info_dir) # Files extracted. Call the modifier. modified = modifier(path_map, **kwargs) if modified: # Something changed - need to build a new wheel. current_version, path = get_version(path_map, info_dir) if current_version and (current_version == original_version): # Add or update local version to signify changes. update_version(current_version, path) # Decide where the new wheel goes. if dest_dir is None: fd, newpath = tempfile.mkstemp(suffix='.whl', prefix='wheel-update-', dir=workdir) os.close(fd) else: if not os.path.isdir(dest_dir): raise DistlibException('Not a directory: %r' % dest_dir) newpath = os.path.join(dest_dir, self.filename) archive_paths = list(path_map.items()) distinfo = os.path.join(workdir, info_dir) info = distinfo, info_dir self.write_records(info, workdir, archive_paths) self.build_zip(newpath, archive_paths) if dest_dir is None: shutil.copyfile(newpath, pathname) return modified def compatible_tags(): """ Return (pyver, abi, arch) tuples compatible with this Python. """ versions = [VER_SUFFIX] major = VER_SUFFIX[0] for minor in range(sys.version_info[1] - 1, - 1, -1): versions.append(''.join([major, str(minor)])) abis = [] for suffix, _, _ in imp.get_suffixes(): if suffix.startswith('.abi'): abis.append(suffix.split('.', 2)[1]) abis.sort() if ABI != 'none': abis.insert(0, ABI) abis.append('none') result = [] arches = [ARCH] if sys.platform == 'darwin': m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) if m: name, major, minor, arch = m.groups() minor = int(minor) matches = [arch] if arch in ('i386', 'ppc'): matches.append('fat') if arch in ('i386', 'ppc', 'x86_64'): matches.append('fat3') if arch in ('ppc64', 'x86_64'): matches.append('fat64') if arch in ('i386', 'x86_64'): matches.append('intel') if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): matches.append('universal') while minor >= 0: for match in matches: s = '%s_%s_%s_%s' % (name, major, minor, match) if s != ARCH: # already there arches.append(s) minor -= 1 # Most specific - our Python version, ABI and arch for abi in abis: for arch in arches: result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) # where no ABI / arch dependency, but IMP_PREFIX dependency for i, version in enumerate(versions): result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) if i == 0: result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) # no IMP_PREFIX, ABI or arch dependency for i, version in enumerate(versions): result.append((''.join(('py', version)), 'none', 'any')) if i == 0: result.append((''.join(('py', version[0])), 'none', 'any')) return set(result) COMPATIBLE_TAGS = compatible_tags() del compatible_tags def is_compatible(wheel, tags=None): if not isinstance(wheel, Wheel): wheel = Wheel(wheel) # assume it's a filename result = False if tags is None: tags = COMPATIBLE_TAGS for ver, abi, arch in tags: if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: result = True break return result
nitzmahone/ansible
refs/heads/devel
lib/ansible/modules/network/aci/aci_bd.py
15
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_bd short_description: Manage Bridge Domains (BD) objects (fv:BD) description: - Manages Bridge Domains (BD) on Cisco ACI fabrics. notes: - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. - More information about the internal APIC class B(fv:BD) from L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/). author: - Jacob McGill (@jmcgill298) version_added: '2.4' options: arp_flooding: description: - Determines if the Bridge Domain should flood ARP traffic. - The APIC defaults to C(no) when unset during creation. type: bool bd: description: - The name of the Bridge Domain. aliases: [ bd_name, name ] bd_type: description: - The type of traffic on the Bridge Domain. - The APIC defaults to C(ethernet) when unset during creation. choices: [ ethernet, fc ] description: description: - Description for the Bridge Domain. enable_multicast: description: - Determines if PIM is enabled. - The APIC defaults to C(no) when unset during creation. type: bool enable_routing: description: - Determines if IP forwarding should be allowed. - The APIC defaults to C(yes) when unset during creation. type: bool endpoint_clear: description: - Clears all End Points in all Leaves when C(yes). - The value is not reset to disabled once End Points have been cleared; that requires a second task. - The APIC defaults to C(no) when unset during creation. type: bool endpoint_move_detect: description: - Determines if GARP should be enabled to detect when End Points move. - The APIC defaults to C(garp) when unset during creation. choices: [ default, garp ] endpoint_retention_action: description: - Determines if the Bridge Domain should inherit or resolve the End Point Retention Policy. - The APIC defaults to C(resolve) when unset during creation. choices: [ inherit, resolve ] endpoint_retention_policy: description: - The name of the End Point Retention Policy the Bridge Domain should use when overriding the default End Point Retention Policy. igmp_snoop_policy: description: - The name of the IGMP Snooping Policy the Bridge Domain should use when overriding the default IGMP Snooping Policy. ip_learning: description: - Determines if the Bridge Domain should learn End Point IPs. - The APIC defaults to C(yes) when unset during creation. type: bool ipv6_nd_policy: description: - The name of the IPv6 Neighbor Discovery Policy the Bridge Domain should use when overridding the default IPV6 ND Policy. l2_unknown_unicast: description: - Determines what forwarding method to use for unknown l2 destinations. - The APIC defaults to C(proxy) when unset during creation. choices: [ proxy, flood ] l3_unknown_multicast: description: - Determines the forwarding method to use for unknown multicast destinations. - The APIC defaults to C(flood) when unset during creation. choices: [ flood, opt-flood ] limit_ip_learn: description: - Determines if the BD should limit IP learning to only subnets owned by the Bridge Domain. - The APIC defaults to C(yes) when unset during creation. type: bool mac_address: description: - The MAC Address to assign to the C(bd) instead of using the default. - The APIC defaults to C(00:22:BD:F8:19:FF) when unset during creation. aliases: [ mac ] version_added: '2.5' multi_dest: description: - Determines the forwarding method for L2 multicast, broadcast, and link layer traffic. - The APIC defaults to C(bd-flood) when unset during creation. choices: [ bd-flood, drop, encap-flood ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. choices: [ absent, present, query ] default: present tenant: description: - The name of the Tenant. aliases: [ tenant_name ] vrf: description: - The name of the VRF. aliases: [ vrf_name ] extends_documentation_fragment: aci ''' EXAMPLES = r''' - name: Add Bridge Domain aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: no tenant: prod bd: web_servers mac_address: 00:22:BD:F8:19:FE vrf: prod_vrf state: present delegate_to: localhost - name: Add an FC Bridge Domain aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: no tenant: prod bd: storage bd_type: fc vrf: fc_vrf enable_routing: no state: present delegate_to: localhost - name: Modify a Bridge Domain aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: yes tenant: prod bd: web_servers arp_flooding: yes l2_unknown_unicast: flood state: present delegate_to: localhost - name: Query All Bridge Domains aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: yes state: query delegate_to: localhost register: query_result - name: Query a Bridge Domain aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: yes tenant: prod bd: web_servers state: query delegate_to: localhost register: query_result - name: Delete a Bridge Domain aci_bd: host: "{{ inventory_hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: yes tenant: prod bd: web_servers state: absent delegate_to: localhost ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: string sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: string sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: string sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: string sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: string sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update( arp_flooding=dict(type='bool'), bd=dict(type='str', aliases=['bd_name', 'name']), # Not required for querying all objects bd_type=dict(type='str', choices=['ethernet', 'fc']), description=dict(type='str'), enable_multicast=dict(type='bool'), enable_routing=dict(type='bool'), endpoint_clear=dict(type='bool'), endpoint_move_detect=dict(type='str', choices=['default', 'garp']), endpoint_retention_action=dict(type='str', choices=['inherit', 'resolve']), endpoint_retention_policy=dict(type='str'), igmp_snoop_policy=dict(type='str'), ip_learning=dict(type='bool'), ipv6_nd_policy=dict(type='str'), l2_unknown_unicast=dict(type='str', choices=['proxy', 'flood']), l3_unknown_multicast=dict(type='str', choices=['flood', 'opt-flood']), limit_ip_learn=dict(type='bool'), mac_address=dict(type='str', aliases=['mac']), multi_dest=dict(type='str', choices=['bd-flood', 'drop', 'encap-flood']), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects vrf=dict(type='str', aliases=['vrf_name']), gateway_ip=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4 scope=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4 subnet_mask=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4 ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['bd', 'tenant']], ['state', 'present', ['bd', 'tenant']], ], ) aci = ACIModule(module) arp_flooding = aci.boolean(module.params['arp_flooding']) bd = module.params['bd'] bd_type = module.params['bd_type'] if bd_type == 'ethernet': # ethernet type is represented as regular, but that is not clear to the users bd_type = 'regular' description = module.params['description'] enable_multicast = aci.boolean(module.params['enable_multicast']) enable_routing = aci.boolean(module.params['enable_routing']) endpoint_clear = aci.boolean(module.params['endpoint_clear']) endpoint_move_detect = module.params['endpoint_move_detect'] if endpoint_move_detect == 'default': # the ACI default setting is an empty string, but that is not a good input value endpoint_move_detect = '' endpoint_retention_action = module.params['endpoint_retention_action'] endpoint_retention_policy = module.params['endpoint_retention_policy'] igmp_snoop_policy = module.params['igmp_snoop_policy'] ip_learning = aci.boolean(module.params['ip_learning']) ipv6_nd_policy = module.params['ipv6_nd_policy'] l2_unknown_unicast = module.params['l2_unknown_unicast'] l3_unknown_multicast = module.params['l3_unknown_multicast'] limit_ip_learn = aci.boolean(module.params['limit_ip_learn']) mac_address = module.params['mac_address'] multi_dest = module.params['multi_dest'] state = module.params['state'] tenant = module.params['tenant'] vrf = module.params['vrf'] # Give warning when fvSubnet parameters are passed as those have been moved to the aci_subnet module if module.params['gateway_ip'] or module.params['subnet_mask'] or module.params['scope']: module._warnings = ["The support for managing Subnets has been moved to its own module, aci_subnet. \ The new modules still supports 'gateway_ip' and 'subnet_mask' along with more features"] aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), module_object=tenant, target_filter={'name': tenant}, ), subclass_1=dict( aci_class='fvBD', aci_rn='BD-{0}'.format(bd), module_object=bd, target_filter={'name': bd}, ), child_classes=['fvRsCtx', 'fvRsIgmpsn', 'fvRsBDToNdP', 'fvRsBdToEpRet'], ) aci.get_existing() if state == 'present': aci.payload( aci_class='fvBD', class_config=dict( arpFlood=arp_flooding, descr=description, epClear=endpoint_clear, epMoveDetectMode=endpoint_move_detect, ipLearning=ip_learning, limitIpLearnToSubnets=limit_ip_learn, mac=mac_address, mcastAllow=enable_multicast, multiDstPktAct=multi_dest, name=bd, type=bd_type, unicastRoute=enable_routing, unkMacUcastAct=l2_unknown_unicast, unkMcastAct=l3_unknown_multicast, ), child_configs=[ {'fvRsCtx': {'attributes': {'tnFvCtxName': vrf}}}, {'fvRsIgmpsn': {'attributes': {'tnIgmpSnoopPolName': igmp_snoop_policy}}}, {'fvRsBDToNdP': {'attributes': {'tnNdIfPolName': ipv6_nd_policy}}}, {'fvRsBdToEpRet': {'attributes': {'resolveAct': endpoint_retention_action, 'tnFvEpRetPolName': endpoint_retention_policy}}}, ], ) aci.get_diff(aci_class='fvBD') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
frouty/odoo_oph
refs/heads/dev_70
addons/stock/report/__init__.py
73
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import product_stock import picking import lot_overview_all import report_stock import report_stock_move import stock_inventory_move_report import lot_overview # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
indictranstech/fbd_frappe
refs/heads/develop
frappe/email/doctype/email_alert/test_email_alert.py
37
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # See license.txt from __future__ import unicode_literals import frappe, frappe.utils, frappe.utils.scheduler import unittest test_records = frappe.get_test_records('Email Alert') class TestEmailAlert(unittest.TestCase): def setUp(self): frappe.db.sql("""delete from `tabBulk Email`""") frappe.set_user("test1@example.com") def tearDown(self): frappe.set_user("Administrator") def test_new_and_save(self): comment = frappe.new_doc("Comment") comment.comment = "test" comment.insert(ignore_permissions=True) self.assertTrue(frappe.db.get_value("Bulk Email", {"reference_doctype": "Comment", "reference_name": comment.name, "status":"Not Sent"})) frappe.db.sql("""delete from `tabBulk Email`""") comment.description = "test" comment.save() self.assertTrue(frappe.db.get_value("Bulk Email", {"reference_doctype": "Comment", "reference_name": comment.name, "status":"Not Sent"})) def test_condition(self): event = frappe.new_doc("Event") event.subject = "test", event.event_type = "Private" event.starts_on = "2014-06-06 12:00:00" event.insert() self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) event.event_type = "Public" event.save() self.assertTrue(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) def test_value_changed(self): event = frappe.new_doc("Event") event.subject = "test", event.event_type = "Private" event.starts_on = "2014-06-06 12:00:00" event.insert() self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) event.subject = "test 1" event.save() self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) event.description = "test" event.save() self.assertTrue(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) def test_date_changed(self): event = frappe.new_doc("Event") event.subject = "test", event.event_type = "Private" event.starts_on = "2014-01-01 12:00:00" event.insert() self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) frappe.utils.scheduler.trigger(frappe.local.site, "daily", now=True) # not today, so no alert self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) event.starts_on = frappe.utils.add_days(frappe.utils.nowdate(), 2) + " 12:00:00" event.save() self.assertFalse(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"})) frappe.utils.scheduler.trigger(frappe.local.site, "daily", now=True) # today so show alert self.assertTrue(frappe.db.get_value("Bulk Email", {"reference_doctype": "Event", "reference_name": event.name, "status":"Not Sent"}))
ContinuumIO/chaco
refs/heads/master
examples/demo/basic/cmap_image_select.py
3
#!/usr/bin/env python """ Draws a colormapped image plot - Left-drag pans the plot. - Mousewheel up and down zooms the plot in and out. - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom history". """ # Major library imports from numpy import linspace, meshgrid, pi from scipy.special import jn # Enthought library imports from enable.api import Component, ComponentEditor from traits.api import HasTraits, Instance from traitsui.api import Item, Group, View # Chaco imports from chaco.api import ArrayPlotData, ColorBar, HPlotContainer, jet, \ LinearMapper, Plot from chaco.tools.api import PanTool, RangeSelection, \ RangeSelectionOverlay, ZoomTool #=============================================================================== # # Create the Chaco plot. #=============================================================================== def _create_plot_component(): # Create a scalar field to colormap# Create a scalar field to colormap xbounds = (-2*pi, 2*pi, 600) ybounds = (-1.5*pi, 1.5*pi, 300) xs = linspace(*xbounds) ys = linspace(*ybounds) x, y = meshgrid(xs,ys) z = jn(2, x)*y*x # Create a plot data obect and give it this data pd = ArrayPlotData() pd.set_data("imagedata", z) # Create the plot plot = Plot(pd) plot.img_plot("imagedata", name="my_plot", xbounds=xbounds[:2], ybounds=ybounds[:2], colormap=jet) # Tweak some of the plot properties plot.title = "Selectable Image Plot" plot.padding = 50 # Right now, some of the tools are a little invasive, and we need the # actual CMapImage object to give to them my_plot = plot.plots["my_plot"][0] # Attach some tools to the plot plot.tools.append(PanTool(plot)) zoom = ZoomTool(component=plot, tool_mode="box", always_on=False) plot.overlays.append(zoom) # Create the colorbar, handing in the appropriate range and colormap colormap = my_plot.color_mapper colorbar = ColorBar(index_mapper=LinearMapper(range=colormap.range), color_mapper=colormap, plot=my_plot, orientation='v', resizable='v', width=30, padding=20) colorbar.padding_top = plot.padding_top colorbar.padding_bottom = plot.padding_bottom # create a range selection for the colorbar range_selection = RangeSelection(component=colorbar) colorbar.tools.append(range_selection) colorbar.overlays.append(RangeSelectionOverlay(component=colorbar, border_color="white", alpha=0.8, fill_color="lightgray")) # we also want to the range selection to inform the cmap plot of # the selection, so set that up as well range_selection.listeners.append(my_plot) # Create a container to position the plot and the colorbar side-by-side container = HPlotContainer(use_backbuffer = True) container.add(plot) container.add(colorbar) container.bgcolor = "lightgray" #my_plot.set_value_selection((-1.3, 6.9)) return container #=============================================================================== # Attributes to use for the plot view. size=(800,600) title="Colormapped Image Plot" #=============================================================================== # # Demo class that is used by the demo.py application. #=============================================================================== class Demo(HasTraits): plot = Instance(Component) traits_view = View( Group( Item('plot', editor=ComponentEditor(size=size), show_label=False), orientation = "vertical"), resizable=True, title=title ) def _plot_default(self): return _create_plot_component() demo = Demo() if __name__ == "__main__": demo.configure_traits()
xcyan/models
refs/heads/master
cognitive_mapping_and_planning/datasets/nav_env.py
14
# Copyright 2016 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Navidation Environment. Includes the following classes along with some helper functions. Building: Loads buildings, computes traversibility, exposes functionality for rendering images. GridWorld: Base class which implements functionality for moving an agent on a grid world. NavigationEnv: Base class which generates navigation problems on a grid world. VisualNavigationEnv: Builds upon NavigationEnv and Building to provide interface that is used externally to train the agent. MeshMapper: Class used for distilling the model, testing the mapper. BuildingMultiplexer: Wrapper class that instantiates a VisualNavigationEnv for each building and multiplexes between them as needed. """ import numpy as np import os import re import matplotlib.pyplot as plt import graph_tool as gt import graph_tool.topology from tensorflow.python.platform import gfile import logging import src.file_utils as fu import src.utils as utils import src.graph_utils as gu import src.map_utils as mu import src.depth_utils as du import render.swiftshader_renderer as sru from render.swiftshader_renderer import SwiftshaderRenderer import cv2 label_nodes_with_class = gu.label_nodes_with_class label_nodes_with_class_geodesic = gu.label_nodes_with_class_geodesic get_distance_node_list = gu.get_distance_node_list convert_to_graph_tool = gu.convert_to_graph_tool generate_graph = gu.generate_graph get_hardness_distribution = gu.get_hardness_distribution rng_next_goal_rejection_sampling = gu.rng_next_goal_rejection_sampling rng_next_goal = gu.rng_next_goal rng_room_to_room = gu.rng_room_to_room rng_target_dist_field = gu.rng_target_dist_field compute_traversibility = mu.compute_traversibility make_map = mu.make_map resize_maps = mu.resize_maps pick_largest_cc = mu.pick_largest_cc get_graph_origin_loc = mu.get_graph_origin_loc generate_egocentric_maps = mu.generate_egocentric_maps generate_goal_images = mu.generate_goal_images get_map_to_predict = mu.get_map_to_predict bin_points = du.bin_points make_geocentric = du.make_geocentric get_point_cloud_from_z = du.get_point_cloud_from_z get_camera_matrix = du.get_camera_matrix def _get_semantic_maps(folder_name, building_name, map, flip): # Load file from the cache. file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' file_name = file_name.format(building_name, map.size[0], map.size[1], map.origin[0], map.origin[1], map.resolution, flip) file_name = os.path.join(folder_name, file_name) logging.info('Loading semantic maps from %s.', file_name) if fu.exists(file_name): a = utils.load_variables(file_name) maps = a['maps'] #HxWx#C cats = a['cats'] else: logging.error('file_name: %s not found.', file_name) maps = None cats = None return maps, cats def _select_classes(all_maps, all_cats, cats_to_use): inds = [] for c in cats_to_use: ind = all_cats.index(c) inds.append(ind) out_maps = all_maps[:,:,inds] return out_maps def _get_room_dimensions(file_name, resolution, origin, flip=False): if fu.exists(file_name): a = utils.load_variables(file_name)['room_dimension'] names = a.keys() dims = np.concatenate(a.values(), axis=0).reshape((-1,6)) ind = np.argsort(names) dims = dims[ind,:] names = [names[x] for x in ind] if flip: dims_new = dims*1 dims_new[:,1] = -dims[:,4] dims_new[:,4] = -dims[:,1] dims = dims_new*1 dims = dims*100. dims[:,0] = dims[:,0] - origin[0] dims[:,1] = dims[:,1] - origin[1] dims[:,3] = dims[:,3] - origin[0] dims[:,4] = dims[:,4] - origin[1] dims = dims / resolution out = {'names': names, 'dims': dims} else: out = None return out def _filter_rooms(room_dims, room_regex): pattern = re.compile(room_regex) ind = [] for i, name in enumerate(room_dims['names']): if pattern.match(name): ind.append(i) new_room_dims = {} new_room_dims['names'] = [room_dims['names'][i] for i in ind] new_room_dims['dims'] = room_dims['dims'][ind,:]*1 return new_room_dims def _label_nodes_with_room_id(xyt, room_dims): # Label the room with the ID into things. node_room_id = -1*np.ones((xyt.shape[0], 1)) dims = room_dims['dims'] for x, name in enumerate(room_dims['names']): all_ = np.concatenate((xyt[:,[0]] >= dims[x,0], xyt[:,[0]] <= dims[x,3], xyt[:,[1]] >= dims[x,1], xyt[:,[1]] <= dims[x,4]), axis=1) node_room_id[np.all(all_, axis=1), 0] = x return node_room_id def get_path_ids(start_node_id, end_node_id, pred_map): id = start_node_id path = [id] while id != end_node_id: id = pred_map[id] path.append(id) return path def image_pre(images, modalities): # Assumes images are ...xHxWxC. # We always assume images are RGB followed by Depth. if 'depth' in modalities: d = images[...,-1][...,np.newaxis]*1. d[d < 0.01] = np.NaN; isnan = np.isnan(d); d = 100./d; d[isnan] = 0.; images = np.concatenate((images[...,:-1], d, isnan), axis=images.ndim-1) if 'rgb' in modalities: images[...,:3] = images[...,:3]*1. - 128 return images def _get_relative_goal_loc(goal_loc, loc, theta): r = np.sqrt(np.sum(np.square(goal_loc - loc), axis=1)) t = np.arctan2(goal_loc[:,1] - loc[:,1], goal_loc[:,0] - loc[:,0]) t = t-theta[:,0] + np.pi/2 return np.expand_dims(r,axis=1), np.expand_dims(t, axis=1) def _gen_perturbs(rng, batch_size, num_steps, lr_flip, delta_angle, delta_xy, structured): perturbs = [] for i in range(batch_size): # Doing things one by one for each episode in this batch. This way this # remains replicatable even when we change the batch size. p = np.zeros((num_steps+1, 4)) if lr_flip: # Flip the whole trajectory. p[:,3] = rng.rand(1)-0.5 if delta_angle > 0: if structured: p[:,2] = (rng.rand(1)-0.5)* delta_angle else: p[:,2] = (rng.rand(p.shape[0])-0.5)* delta_angle if delta_xy > 0: if structured: p[:,:2] = (rng.rand(1, 2)-0.5)*delta_xy else: p[:,:2] = (rng.rand(p.shape[0], 2)-0.5)*delta_xy perturbs.append(p) return perturbs def get_multiplexer_class(args, task_number): assert(args.task_params.base_class == 'Building') logging.info('Returning BuildingMultiplexer') R = BuildingMultiplexer(args, task_number) return R class GridWorld(): def __init__(self): """Class members that will be assigned by any class that actually uses this class.""" self.restrict_to_largest_cc = None self.robot = None self.env = None self.category_list = None self.traversible = None def get_loc_axis(self, node, delta_theta, perturb=None): """Based on the node orientation returns X, and Y axis. Used to sample the map in egocentric coordinate frame. """ if type(node) == tuple: node = np.array([node]) if perturb is None: perturb = np.zeros((node.shape[0], 4)) xyt = self.to_actual_xyt_vec(node) x = xyt[:,[0]] + perturb[:,[0]] y = xyt[:,[1]] + perturb[:,[1]] t = xyt[:,[2]] + perturb[:,[2]] theta = t*delta_theta loc = np.concatenate((x,y), axis=1) x_axis = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) y_axis = np.concatenate((np.cos(theta+np.pi/2.), np.sin(theta+np.pi/2.)), axis=1) # Flip the sampled map where need be. y_axis[np.where(perturb[:,3] > 0)[0], :] *= -1. return loc, x_axis, y_axis, theta def to_actual_xyt(self, pqr): """Converts from node to location on the map.""" (p, q, r) = pqr if self.task.n_ori == 6: out = (p - q * 0.5 + self.task.origin_loc[0], q * np.sqrt(3.) / 2. + self.task.origin_loc[1], r) elif self.task.n_ori == 4: out = (p + self.task.origin_loc[0], q + self.task.origin_loc[1], r) return out def to_actual_xyt_vec(self, pqr): """Converts from node array to location array on the map.""" p = pqr[:,0][:, np.newaxis] q = pqr[:,1][:, np.newaxis] r = pqr[:,2][:, np.newaxis] if self.task.n_ori == 6: out = np.concatenate((p - q * 0.5 + self.task.origin_loc[0], q * np.sqrt(3.) / 2. + self.task.origin_loc[1], r), axis=1) elif self.task.n_ori == 4: out = np.concatenate((p + self.task.origin_loc[0], q + self.task.origin_loc[1], r), axis=1) return out def raw_valid_fn_vec(self, xyt): """Returns if the given set of nodes is valid or not.""" height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:,[0]]).astype(np.int32) y = np.round(xyt[:,[1]]).astype(np.int32) is_inside = np.all(np.concatenate((x >= 0, y >= 0, x < width, y < height), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), width-1) y = np.minimum(np.maximum(y, 0), height-1) ind = np.ravel_multi_index((y,x), self.traversible.shape) is_traversible = self.traversible.ravel()[ind] is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), axis=1), axis=1) return is_valid def valid_fn_vec(self, pqr): """Returns if the given set of nodes is valid or not.""" xyt = self.to_actual_xyt_vec(np.array(pqr)) height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:,[0]]).astype(np.int32) y = np.round(xyt[:,[1]]).astype(np.int32) is_inside = np.all(np.concatenate((x >= 0, y >= 0, x < width, y < height), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), width-1) y = np.minimum(np.maximum(y, 0), height-1) ind = np.ravel_multi_index((y,x), self.traversible.shape) is_traversible = self.traversible.ravel()[ind] is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), axis=1), axis=1) return is_valid def get_feasible_actions(self, node_ids): """Returns the feasible set of actions from the current node.""" a = np.zeros((len(node_ids), self.task_params.num_actions), dtype=np.int32) gtG = self.task.gtG next_node = [] for i, c in enumerate(node_ids): neigh = gtG.vertex(c).out_neighbours() neigh_edge = gtG.vertex(c).out_edges() nn = {} for n, e in zip(neigh, neigh_edge): _ = gtG.ep['action'][e] a[i,_] = 1 nn[_] = int(n) next_node.append(nn) return a, next_node def take_action(self, current_node_ids, action): """Returns the new node after taking the action action. Stays at the current node if the action is invalid.""" actions, next_node_ids = self.get_feasible_actions(current_node_ids) new_node_ids = [] for i, (c,a) in enumerate(zip(current_node_ids, action)): if actions[i,a] == 1: new_node_ids.append(next_node_ids[i][a]) else: new_node_ids.append(c) return new_node_ids def set_r_obj(self, r_obj): """Sets the SwiftshaderRenderer object used for rendering.""" self.r_obj = r_obj class Building(GridWorld): def __init__(self, building_name, robot, env, category_list=None, small=False, flip=False, logdir=None, building_loader=None): self.restrict_to_largest_cc = True self.robot = robot self.env = env self.logdir = logdir # Load the building meta data. building = building_loader.load_building(building_name) if small: building['mesh_names'] = building['mesh_names'][:5] # New code. shapess = building_loader.load_building_meshes(building) if flip: for shapes in shapess: shapes.flip_shape() vs = [] for shapes in shapess: vs.append(shapes.get_vertices()[0]) vs = np.concatenate(vs, axis=0) map = make_map(env.padding, env.resolution, vertex=vs, sc=100.) map = compute_traversibility( map, robot.base, robot.height, robot.radius, env.valid_min, env.valid_max, env.num_point_threshold, shapess=shapess, sc=100., n_samples_per_face=env.n_samples_per_face) room_dims = _get_room_dimensions(building['room_dimension_file'], env.resolution, map.origin, flip=flip) class_maps, class_map_names = _get_semantic_maps( building['class_map_folder'], building_name, map, flip) self.class_maps = class_maps self.class_map_names = class_map_names self.building = building self.shapess = shapess self.map = map self.traversible = map.traversible*1 self.building_name = building_name self.room_dims = room_dims self.flipped = flip self.renderer_entitiy_ids = [] if self.restrict_to_largest_cc: self.traversible = pick_largest_cc(self.traversible) def load_building_into_scene(self): # Loads the scene. self.renderer_entitiy_ids += self.r_obj.load_shapes(self.shapess) # Free up memory, we dont need the mesh or the materials anymore. self.shapess = None def add_entity_at_nodes(self, nodes, height, shape): xyt = self.to_actual_xyt_vec(nodes) nxy = xyt[:,:2]*1. nxy = nxy * self.map.resolution nxy = nxy + self.map.origin Ts = np.concatenate((nxy, nxy[:,:1]), axis=1) Ts[:,2] = height; Ts = Ts / 100.; # Merge all the shapes into a single shape and add that shape. shape.replicate_shape(Ts) entity_ids = self.r_obj.load_shapes([shape]) self.renderer_entitiy_ids += entity_ids return entity_ids def add_shapes(self, shapes): scene = self.r_obj.viz.scene() for shape in shapes: scene.AddShape(shape) def add_materials(self, materials): scene = self.r_obj.viz.scene() for material in materials: scene.AddOrUpdateMaterial(material) def set_building_visibility(self, visibility): self.r_obj.set_entity_visible(self.renderer_entitiy_ids, visibility) def render_nodes(self, nodes, perturb=None, aux_delta_theta=0.): self.set_building_visibility(True) if perturb is None: perturb = np.zeros((len(nodes), 4)) imgs = [] r = 2 elevation_z = r * np.tan(np.deg2rad(self.robot.camera_elevation_degree)) for i in range(len(nodes)): xyt = self.to_actual_xyt(nodes[i]) lookat_theta = 3.0 * np.pi / 2.0 - (xyt[2]+perturb[i,2]+aux_delta_theta) * (self.task.delta_theta) nxy = np.array([xyt[0]+perturb[i,0], xyt[1]+perturb[i,1]]).reshape(1, -1) nxy = nxy * self.map.resolution nxy = nxy + self.map.origin camera_xyz = np.zeros((1, 3)) camera_xyz[...] = [nxy[0, 0], nxy[0, 1], self.robot.sensor_height] camera_xyz = camera_xyz / 100. lookat_xyz = np.array([-r * np.sin(lookat_theta), -r * np.cos(lookat_theta), elevation_z]) lookat_xyz = lookat_xyz + camera_xyz[0, :] self.r_obj.position_camera(camera_xyz[0, :].tolist(), lookat_xyz.tolist(), [0.0, 0.0, 1.0]) img = self.r_obj.render(take_screenshot=True, output_type=0) img = [x for x in img if x is not None] img = np.concatenate(img, axis=2).astype(np.float32) if perturb[i,3]>0: img = img[:,::-1,:] imgs.append(img) self.set_building_visibility(False) return imgs class MeshMapper(Building): def __init__(self, robot, env, task_params, building_name, category_list, flip, logdir=None, building_loader=None): Building.__init__(self, building_name, robot, env, category_list, small=task_params.toy_problem, flip=flip, logdir=logdir, building_loader=building_loader) self.task_params = task_params self.task = None self._preprocess_for_task(self.task_params.building_seed) def _preprocess_for_task(self, seed): if self.task is None or self.task.seed != seed: rng = np.random.RandomState(seed) origin_loc = get_graph_origin_loc(rng, self.traversible) self.task = utils.Foo(seed=seed, origin_loc=origin_loc, n_ori=self.task_params.n_ori) G = generate_graph(self.valid_fn_vec, self.task_params.step_size, self.task.n_ori, (0, 0, 0)) gtG, nodes, nodes_to_id = convert_to_graph_tool(G) self.task.gtG = gtG self.task.nodes = nodes self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) self.task.nodes_to_id = nodes_to_id logging.info('Building %s, #V=%d, #E=%d', self.building_name, self.task.nodes.shape[0], self.task.gtG.num_edges()) if self.logdir is not None: write_traversible = cv2.applyColorMap(self.traversible.astype(np.uint8)*255, cv2.COLORMAP_JET) img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) node_xyt = self.to_actual_xyt_vec(self.task.nodes) plt.set_cmap('jet'); fig, ax = utils.subplot(plt, (1,1), (12,12)) ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') ax.imshow(self.traversible, origin='lower'); ax.set_axis_off(); ax.axis('equal'); ax.set_title('{:s}, {:d}, {:d}'.format(self.building_name, self.task.nodes.shape[0], self.task.gtG.num_edges())) if self.room_dims is not None: for i, r in enumerate(self.room_dims['dims']*1): min_ = r[:3]*1 max_ = r[3:]*1 xmin, ymin, zmin = min_ xmax, ymax, zmax = max_ ax.plot([xmin, xmax, xmax, xmin, xmin], [ymin, ymin, ymax, ymax, ymin], 'g') with fu.fopen(img_path, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig) def _gen_rng(self, rng): # instances is a list of list of node_ids. if self.task_params.move_type == 'circle': _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, self.task.gtG, rng, 0, 1, compute_path=True) instances_ = paths instances = [] for instance_ in instances_: instance = instance_ for i in range(self.task_params.num_steps): instance.append(self.take_action([instance[-1]], [1])[0]) instances.append(instance) elif self.task_params.move_type == 'shortest_path': _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, self.task.gtG, rng, self.task_params.num_steps, self.task_params.num_steps+1, compute_path=True) instances = paths elif self.task_params.move_type == 'circle+forward': _, _, _, _, paths = rng_target_dist_field(self.task_params.batch_size, self.task.gtG, rng, 0, 1, compute_path=True) instances_ = paths instances = [] for instance_ in instances_: instance = instance_ for i in range(self.task_params.n_ori-1): instance.append(self.take_action([instance[-1]], [1])[0]) while len(instance) <= self.task_params.num_steps: while self.take_action([instance[-1]], [3])[0] == instance[-1] and len(instance) <= self.task_params.num_steps: instance.append(self.take_action([instance[-1]], [2])[0]) if len(instance) <= self.task_params.num_steps: instance.append(self.take_action([instance[-1]], [3])[0]) instances.append(instance) # Do random perturbation if needed. perturbs = _gen_perturbs(rng, self.task_params.batch_size, self.task_params.num_steps, self.task_params.data_augment.lr_flip, self.task_params.data_augment.delta_angle, self.task_params.data_augment.delta_xy, self.task_params.data_augment.structured) return instances, perturbs def worker(self, instances, perturbs): # Output the images and the free space. # Make the instances be all the same length. for i in range(len(instances)): for j in range(self.task_params.num_steps - len(instances[i]) + 1): instances[i].append(instances[i][-1]) if perturbs[i].shape[0] < self.task_params.num_steps+1: p = np.zeros((self.task_params.num_steps+1, 4)) p[:perturbs[i].shape[0], :] = perturbs[i] p[perturbs[i].shape[0]:, :] = perturbs[i][-1,:] perturbs[i] = p instances_ = [] for instance in instances: instances_ = instances_ + instance perturbs_ = np.concatenate(perturbs, axis=0) instances_nodes = self.task.nodes[instances_,:] instances_nodes = [tuple(x) for x in instances_nodes] imgs_ = self.render_nodes(instances_nodes, perturbs_) imgs = []; next = 0; for instance in instances: img_i = [] for _ in instance: img_i.append(imgs_[next]) next = next+1 imgs.append(img_i) imgs = np.array(imgs) # Render out the maps in the egocentric view for all nodes and not just the # last node. all_nodes = [] for x in instances: all_nodes = all_nodes + x all_perturbs = np.concatenate(perturbs, axis=0) loc, x_axis, y_axis, theta = self.get_loc_axis( self.task.nodes[all_nodes, :]*1, delta_theta=self.task.delta_theta, perturb=all_perturbs) fss = None valids = None loc_on_map = None theta_on_map = None cum_fs = None cum_valid = None incremental_locs = None incremental_thetas = None if self.task_params.output_free_space: fss, valids = get_map_to_predict(loc, x_axis, y_axis, map=self.traversible*1., map_size=self.task_params.map_size) fss = np.array(fss) > 0.5 fss = np.reshape(fss, [self.task_params.batch_size, self.task_params.num_steps+1, self.task_params.map_size, self.task_params.map_size]) valids = np.reshape(np.array(valids), fss.shape) if self.task_params.output_transform_to_global_map: # Output the transform to the global map. loc_on_map = np.reshape(loc*1, [self.task_params.batch_size, self.task_params.num_steps+1, -1]) # Converting to location wrt to first location so that warping happens # properly. theta_on_map = np.reshape(theta*1, [self.task_params.batch_size, self.task_params.num_steps+1, -1]) if self.task_params.output_incremental_transform: # Output the transform to the global map. incremental_locs_ = np.reshape(loc*1, [self.task_params.batch_size, self.task_params.num_steps+1, -1]) incremental_locs_[:,1:,:] -= incremental_locs_[:,:-1,:] t0 = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, self.task_params.num_steps+1, -1]) t = t0*1 incremental_locs = incremental_locs_*1 incremental_locs[:,:,0] = np.sum(incremental_locs_ * np.concatenate((np.cos(t), np.sin(t)), axis=-1), axis=-1) incremental_locs[:,:,1] = np.sum(incremental_locs_ * np.concatenate((np.cos(t+np.pi/2), np.sin(t+np.pi/2)), axis=-1), axis=-1) incremental_locs[:,0,:] = incremental_locs_[:,0,:] # print incremental_locs_[0,:,:], incremental_locs[0,:,:], t0[0,:,:] incremental_thetas = np.reshape(theta*1, [self.task_params.batch_size, self.task_params.num_steps+1, -1]) incremental_thetas[:,1:,:] += -incremental_thetas[:,:-1,:] if self.task_params.output_canonical_map: loc_ = loc[0::(self.task_params.num_steps+1), :] x_axis = np.zeros_like(loc_); x_axis[:,1] = 1 y_axis = np.zeros_like(loc_); y_axis[:,0] = -1 cum_fs, cum_valid = get_map_to_predict(loc_, x_axis, y_axis, map=self.traversible*1., map_size=self.task_params.map_size) cum_fs = np.array(cum_fs) > 0.5 cum_fs = np.reshape(cum_fs, [self.task_params.batch_size, 1, self.task_params.map_size, self.task_params.map_size]) cum_valid = np.reshape(np.array(cum_valid), cum_fs.shape) inputs = {'fs_maps': fss, 'valid_maps': valids, 'imgs': imgs, 'loc_on_map': loc_on_map, 'theta_on_map': theta_on_map, 'cum_fs_maps': cum_fs, 'cum_valid_maps': cum_valid, 'incremental_thetas': incremental_thetas, 'incremental_locs': incremental_locs} return inputs def pre(self, inputs): inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) if inputs['loc_on_map'] is not None: inputs['loc_on_map'] = inputs['loc_on_map'] - inputs['loc_on_map'][:,[0],:] if inputs['theta_on_map'] is not None: inputs['theta_on_map'] = np.pi/2. - inputs['theta_on_map'] return inputs def _nav_env_reset_helper(type, rng, nodes, batch_size, gtG, max_dist, num_steps, num_goals, data_augment, **kwargs): """Generates and returns a new episode.""" max_compute = max_dist + 4*num_steps if type == 'general': start_node_ids, end_node_ids, dist, pred_map, paths = \ rng_target_dist_field(batch_size, gtG, rng, max_dist, max_compute, nodes=nodes, compute_path=False) target_class = None elif type == 'room_to_room_many': goal_node_ids = []; dists = []; node_room_ids = kwargs['node_room_ids'] # Sample the first one start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( batch_size, gtG, rng, max_dist, max_compute, node_room_ids=node_room_ids, nodes=nodes) start_node_ids = start_node_ids_ goal_node_ids.append(end_node_ids_) dists.append(dist_) for n in range(num_goals-1): start_node_ids_, end_node_ids_, dist_, _, _ = rng_next_goal( goal_node_ids[n], batch_size, gtG, rng, max_dist, max_compute, node_room_ids=node_room_ids, nodes=nodes, dists_from_start_node=dists[n]) goal_node_ids.append(end_node_ids_) dists.append(dist_) target_class = None elif type == 'rng_rejection_sampling_many': num_goals = num_goals goal_node_ids = []; dists = []; n_ori = kwargs['n_ori'] step_size = kwargs['step_size'] min_dist = kwargs['min_dist'] sampling_distribution = kwargs['sampling_distribution'] target_distribution = kwargs['target_distribution'] rejection_sampling_M = kwargs['rejection_sampling_M'] distribution_bins = kwargs['distribution_bins'] for n in range(num_goals): if n == 0: input_nodes = None else: input_nodes = goal_node_ids[n-1] start_node_ids_, end_node_ids_, dist_, _, _, _, _ = rng_next_goal_rejection_sampling( input_nodes, batch_size, gtG, rng, max_dist, min_dist, max_compute, sampling_distribution, target_distribution, nodes, n_ori, step_size, distribution_bins, rejection_sampling_M) if n == 0: start_node_ids = start_node_ids_ goal_node_ids.append(end_node_ids_) dists.append(dist_) target_class = None elif type == 'room_to_room_back': num_goals = num_goals assert(num_goals == 2), 'num_goals must be 2.' goal_node_ids = []; dists = []; node_room_ids = kwargs['node_room_ids'] # Sample the first one. start_node_ids_, end_node_ids_, dist_, _, _ = rng_room_to_room( batch_size, gtG, rng, max_dist, max_compute, node_room_ids=node_room_ids, nodes=nodes) start_node_ids = start_node_ids_ goal_node_ids.append(end_node_ids_) dists.append(dist_) # Set second goal to be starting position, and compute distance to the start node. goal_node_ids.append(start_node_ids) dist = [] for i in range(batch_size): dist_ = gt.topology.shortest_distance( gt.GraphView(gtG, reversed=True), source=gtG.vertex(start_node_ids[i]), target=None) dist_ = np.array(dist_.get_array()) dist.append(dist_) dists.append(dist) target_class = None elif type[:14] == 'to_nearest_obj': # Generate an episode by sampling one of the target classes (with # probability proportional to the number of nodes in the world). # With the sampled class sample a node that is within some distance from # the sampled class. class_nodes = kwargs['class_nodes'] sampling = kwargs['sampling'] dist_to_class = kwargs['dist_to_class'] assert(num_goals == 1), 'Only supports a single goal.' ind = rng.choice(class_nodes.shape[0], size=batch_size) target_class = class_nodes[ind,1] start_node_ids = []; dists = []; goal_node_ids = []; for t in target_class: if sampling == 'uniform': max_dist = max_dist cnts = np.bincount(dist_to_class[t], minlength=max_dist+1)*1. cnts[max_dist+1:] = 0 p_each = 1./ cnts / (max_dist+1.) p_each[cnts == 0] = 0 p = p_each[dist_to_class[t]]*1.; p = p/np.sum(p) start_node_id = rng.choice(p.shape[0], size=1, p=p)[0] else: logging.fatal('Sampling not one of uniform.') start_node_ids.append(start_node_id) dists.append(dist_to_class[t]) # Dummy goal node, same as the start node, so that vis is better. goal_node_ids.append(start_node_id) dists = [dists] goal_node_ids = [goal_node_ids] return start_node_ids, goal_node_ids, dists, target_class class NavigationEnv(GridWorld, Building): """Wrapper around GridWorld which sets up navigation tasks. """ def _debug_save_hardness(self, seed): out_path = os.path.join(self.logdir, '{:s}_{:d}_hardness.png'.format(self.building_name, seed)) batch_size = 4000 rng = np.random.RandomState(0) start_node_ids, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists = \ rng_next_goal_rejection_sampling( None, batch_size, self.task.gtG, rng, self.task_params.max_dist, self.task_params.min_dist, self.task_params.max_dist, self.task.sampling_distribution, self.task.target_distribution, self.task.nodes, self.task_params.n_ori, self.task_params.step_size, self.task.distribution_bins, self.task.rejection_sampling_M) bins = self.task.distribution_bins n_bins = self.task.n_bins with plt.style.context('ggplot'): fig, axes = utils.subplot(plt, (1,2), (10,10)) ax = axes[0] _ = ax.hist(hardnesss, bins=bins, weights=np.ones_like(hardnesss)/len(hardnesss)) ax.plot(bins[:-1]+0.5/n_bins, self.task.target_distribution, 'g') ax.plot(bins[:-1]+0.5/n_bins, self.task.sampling_distribution, 'b') ax.grid('on') ax = axes[1] _ = ax.hist(gt_dists, bins=np.arange(self.task_params.max_dist+1)) ax.grid('on') ax.set_title('Mean: {:0.2f}, Median: {:0.2f}'.format(np.mean(gt_dists), np.median(gt_dists))) with fu.fopen(out_path, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) def _debug_save_map_nodes(self, seed): """Saves traversible space along with nodes generated on the graph. Takes the seed as input.""" img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) node_xyt = self.to_actual_xyt_vec(self.task.nodes) plt.set_cmap('jet'); fig, ax = utils.subplot(plt, (1,1), (12,12)) ax.plot(node_xyt[:,0], node_xyt[:,1], 'm.') ax.set_axis_off(); ax.axis('equal'); if self.room_dims is not None: for i, r in enumerate(self.room_dims['dims']*1): min_ = r[:3]*1 max_ = r[3:]*1 xmin, ymin, zmin = min_ xmax, ymax, zmax = max_ ax.plot([xmin, xmax, xmax, xmin, xmin], [ymin, ymin, ymax, ymax, ymin], 'g') ax.imshow(self.traversible, origin='lower'); with fu.fopen(img_path, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) def _debug_semantic_maps(self, seed): """Saves traversible space along with nodes generated on the graph. Takes the seed as input.""" for i, cls in enumerate(self.task_params.semantic_task.class_map_names): img_path = os.path.join(self.logdir, '{:s}_flip{:d}_{:s}_graph.png'.format(self.building_name, seed, cls)) maps = self.traversible*1. maps += 0.5*(self.task.class_maps_dilated[:,:,i]) write_traversible = (maps*1.+1.)/3.0 write_traversible = (write_traversible*255.).astype(np.uint8)[:,:,np.newaxis] write_traversible = write_traversible + np.zeros((1,1,3), dtype=np.uint8) fu.write_image(img_path, write_traversible[::-1,:,:]) def _preprocess_for_task(self, seed): """Sets up the task field for doing navigation on the grid world.""" if self.task is None or self.task.seed != seed: rng = np.random.RandomState(seed) origin_loc = get_graph_origin_loc(rng, self.traversible) self.task = utils.Foo(seed=seed, origin_loc=origin_loc, n_ori=self.task_params.n_ori) G = generate_graph(self.valid_fn_vec, self.task_params.step_size, self.task.n_ori, (0, 0, 0)) gtG, nodes, nodes_to_id = convert_to_graph_tool(G) self.task.gtG = gtG self.task.nodes = nodes self.task.delta_theta = 2.0*np.pi/(self.task.n_ori*1.) self.task.nodes_to_id = nodes_to_id logging.info('Building %s, #V=%d, #E=%d', self.building_name, self.task.nodes.shape[0], self.task.gtG.num_edges()) type = self.task_params.type if type == 'general': # Do nothing _ = None elif type == 'room_to_room_many' or type == 'room_to_room_back': if type == 'room_to_room_back': assert(self.task_params.num_goals == 2), 'num_goals must be 2.' self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) xyt = self.to_actual_xyt_vec(self.task.nodes) self.task.node_room_ids = _label_nodes_with_room_id(xyt, self.room_dims) self.task.reset_kwargs = {'node_room_ids': self.task.node_room_ids} elif type == 'rng_rejection_sampling_many': n_bins = 20 rejection_sampling_M = self.task_params.rejection_sampling_M min_dist = self.task_params.min_dist bins = np.arange(n_bins+1)/(n_bins*1.) target_d = np.zeros(n_bins); target_d[...] = 1./n_bins; sampling_d = get_hardness_distribution( self.task.gtG, self.task_params.max_dist, self.task_params.min_dist, np.random.RandomState(0), 4000, bins, self.task.nodes, self.task_params.n_ori, self.task_params.step_size) self.task.reset_kwargs = {'distribution_bins': bins, 'target_distribution': target_d, 'sampling_distribution': sampling_d, 'rejection_sampling_M': rejection_sampling_M, 'n_bins': n_bins, 'n_ori': self.task_params.n_ori, 'step_size': self.task_params.step_size, 'min_dist': self.task_params.min_dist} self.task.n_bins = n_bins self.task.distribution_bins = bins self.task.target_distribution = target_d self.task.sampling_distribution = sampling_d self.task.rejection_sampling_M = rejection_sampling_M if self.logdir is not None: self._debug_save_hardness(seed) elif type[:14] == 'to_nearest_obj': self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) xyt = self.to_actual_xyt_vec(self.task.nodes) self.class_maps = _select_classes(self.class_maps, self.class_map_names, self.task_params.semantic_task.class_map_names)*1 self.class_map_names = self.task_params.semantic_task.class_map_names nodes_xyt = self.to_actual_xyt_vec(np.array(self.task.nodes)) tt = utils.Timer(); tt.tic(); if self.task_params.type == 'to_nearest_obj_acc': self.task.class_maps_dilated, self.task.node_class_label = label_nodes_with_class_geodesic( nodes_xyt, self.class_maps, self.task_params.semantic_task.pix_distance+8, self.map.traversible, ff_cost=1., fo_cost=1., oo_cost=4., connectivity=8.) dists = [] for i in range(len(self.class_map_names)): class_nodes_ = np.where(self.task.node_class_label[:,i])[0] dists.append(get_distance_node_list(gtG, source_nodes=class_nodes_, direction='to')) self.task.dist_to_class = dists a_, b_ = np.where(self.task.node_class_label) self.task.class_nodes = np.concatenate((a_[:,np.newaxis], b_[:,np.newaxis]), axis=1) if self.logdir is not None: self._debug_semantic_maps(seed) self.task.reset_kwargs = {'sampling': self.task_params.semantic_task.sampling, 'class_nodes': self.task.class_nodes, 'dist_to_class': self.task.dist_to_class} if self.logdir is not None: self._debug_save_map_nodes(seed) def reset(self, rngs): rng = rngs[0]; rng_perturb = rngs[1]; nodes = self.task.nodes tp = self.task_params start_node_ids, goal_node_ids, dists, target_class = \ _nav_env_reset_helper(tp.type, rng, self.task.nodes, tp.batch_size, self.task.gtG, tp.max_dist, tp.num_steps, tp.num_goals, tp.data_augment, **(self.task.reset_kwargs)) start_nodes = [tuple(nodes[_,:]) for _ in start_node_ids] goal_nodes = [[tuple(nodes[_,:]) for _ in __] for __ in goal_node_ids] data_augment = tp.data_augment perturbs = _gen_perturbs(rng_perturb, tp.batch_size, (tp.num_steps+1)*tp.num_goals, data_augment.lr_flip, data_augment.delta_angle, data_augment.delta_xy, data_augment.structured) perturbs = np.array(perturbs) # batch x steps x 4 end_perturbs = perturbs[:,-(tp.num_goals):,:]*1 # fixed perturb for the goal. perturbs = perturbs[:,:-(tp.num_goals),:]*1 history = -np.ones((tp.batch_size, tp.num_steps*tp.num_goals), dtype=np.int32) self.episode = utils.Foo( start_nodes=start_nodes, start_node_ids=start_node_ids, goal_nodes=goal_nodes, goal_node_ids=goal_node_ids, dist_to_goal=dists, perturbs=perturbs, goal_perturbs=end_perturbs, history=history, target_class=target_class, history_frames=[]) return start_node_ids def take_action(self, current_node_ids, action, step_number): """In addition to returning the action, also returns the reward that the agent receives.""" goal_number = step_number / self.task_params.num_steps new_node_ids = GridWorld.take_action(self, current_node_ids, action) rewards = [] for i, n in enumerate(new_node_ids): reward = 0 if n == self.episode.goal_node_ids[goal_number][i]: reward = self.task_params.reward_at_goal reward = reward - self.task_params.reward_time_penalty rewards.append(reward) return new_node_ids, rewards def get_optimal_action(self, current_node_ids, step_number): """Returns the optimal action from the current node.""" goal_number = step_number / self.task_params.num_steps gtG = self.task.gtG a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32) d_dict = self.episode.dist_to_goal[goal_number] for i, c in enumerate(current_node_ids): neigh = gtG.vertex(c).out_neighbours() neigh_edge = gtG.vertex(c).out_edges() ds = np.array([d_dict[i][int(x)] for x in neigh]) ds_min = np.min(ds) for i_, e in enumerate(neigh_edge): if ds[i_] == ds_min: _ = gtG.ep['action'][e] a[i, _] = 1 return a def get_targets(self, current_node_ids, step_number): """Returns the target actions from the current node.""" action = self.get_optimal_action(current_node_ids, step_number) action = np.expand_dims(action, axis=1) return vars(utils.Foo(action=action)) def get_targets_name(self): """Returns the list of names of the targets.""" return ['action'] def cleanup(self): self.episode = None class VisualNavigationEnv(NavigationEnv): """Class for doing visual navigation in environments. Functions for computing features on states, etc. """ def __init__(self, robot, env, task_params, category_list=None, building_name=None, flip=False, logdir=None, building_loader=None, r_obj=None): tt = utils.Timer() tt.tic() Building.__init__(self, building_name, robot, env, category_list, small=task_params.toy_problem, flip=flip, logdir=logdir, building_loader=building_loader) self.set_r_obj(r_obj) self.task_params = task_params self.task = None self.episode = None self._preprocess_for_task(self.task_params.building_seed) if hasattr(self.task_params, 'map_scales'): self.task.scaled_maps = resize_maps( self.traversible.astype(np.float32)*1, self.task_params.map_scales, self.task_params.map_resize_method) else: logging.fatal('VisualNavigationEnv does not support scale_f anymore.') self.task.readout_maps_scaled = resize_maps( self.traversible.astype(np.float32)*1, self.task_params.readout_maps_scales, self.task_params.map_resize_method) tt.toc(log_at=1, log_str='VisualNavigationEnv __init__: ') def get_weight(self): return self.task.nodes.shape[0] def get_common_data(self): goal_nodes = self.episode.goal_nodes start_nodes = self.episode.start_nodes perturbs = self.episode.perturbs goal_perturbs = self.episode.goal_perturbs target_class = self.episode.target_class goal_locs = []; rel_goal_locs = []; for i in range(len(goal_nodes)): end_nodes = goal_nodes[i] goal_loc, _, _, goal_theta = self.get_loc_axis( np.array(end_nodes), delta_theta=self.task.delta_theta, perturb=goal_perturbs[:,i,:]) # Compute the relative location to all goals from the starting location. loc, _, _, theta = self.get_loc_axis(np.array(start_nodes), delta_theta=self.task.delta_theta, perturb=perturbs[:,0,:]) r_goal, t_goal = _get_relative_goal_loc(goal_loc*1., loc, theta) rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), np.cos(goal_theta-theta), np.sin(goal_theta-theta)), axis=1) rel_goal_locs.append(np.expand_dims(rel_goal_loc, axis=1)) goal_locs.append(np.expand_dims(goal_loc, axis=1)) map = self.traversible*1. maps = np.repeat(np.expand_dims(np.expand_dims(map, axis=0), axis=0), self.task_params.batch_size, axis=0)*1 if self.task_params.type[:14] == 'to_nearest_obj': for i in range(self.task_params.batch_size): maps[i,0,:,:] += 0.5*(self.task.class_maps_dilated[:,:,target_class[i]]) rel_goal_locs = np.concatenate(rel_goal_locs, axis=1) goal_locs = np.concatenate(goal_locs, axis=1) maps = np.expand_dims(maps, axis=-1) if self.task_params.type[:14] == 'to_nearest_obj': rel_goal_locs = np.zeros((self.task_params.batch_size, 1, len(self.task_params.semantic_task.class_map_names)), dtype=np.float32) goal_locs = np.zeros((self.task_params.batch_size, 1, 2), dtype=np.float32) for i in range(self.task_params.batch_size): t = target_class[i] rel_goal_locs[i,0,t] = 1. goal_locs[i,0,0] = t goal_locs[i,0,1] = np.NaN return vars(utils.Foo(orig_maps=maps, goal_loc=goal_locs, rel_goal_loc_at_start=rel_goal_locs)) def pre_common_data(self, inputs): return inputs def get_features(self, current_node_ids, step_number): task_params = self.task_params goal_number = step_number / self.task_params.num_steps end_nodes = self.task.nodes[self.episode.goal_node_ids[goal_number],:]*1 current_nodes = self.task.nodes[current_node_ids,:]*1 end_perturbs = self.episode.goal_perturbs[:,goal_number,:][:,np.newaxis,:] perturbs = self.episode.perturbs target_class = self.episode.target_class # Append to history. self.episode.history[:,step_number] = np.array(current_node_ids) # Render out the images from current node. outs = {} if self.task_params.outputs.images: imgs_all = [] imgs = self.render_nodes([tuple(x) for x in current_nodes], perturb=perturbs[:,step_number,:]) imgs_all.append(imgs) aux_delta_thetas = self.task_params.aux_delta_thetas for i in range(len(aux_delta_thetas)): imgs = self.render_nodes([tuple(x) for x in current_nodes], perturb=perturbs[:,step_number,:], aux_delta_theta=aux_delta_thetas[i]) imgs_all.append(imgs) imgs_all = np.array(imgs_all) # A x B x H x W x C imgs_all = np.transpose(imgs_all, axes=[1,0,2,3,4]) imgs_all = np.expand_dims(imgs_all, axis=1) # B x N x A x H x W x C if task_params.num_history_frames > 0: if step_number == 0: # Append the same frame 4 times for i in range(task_params.num_history_frames+1): self.episode.history_frames.insert(0, imgs_all*1.) self.episode.history_frames.insert(0, imgs_all) self.episode.history_frames.pop() imgs_all_with_history = np.concatenate(self.episode.history_frames, axis=2) else: imgs_all_with_history = imgs_all outs['imgs'] = imgs_all_with_history # B x N x A x H x W x C if self.task_params.outputs.node_ids: outs['node_ids'] = np.array(current_node_ids).reshape((-1,1,1)) outs['perturbs'] = np.expand_dims(perturbs[:,step_number, :]*1., axis=1) if self.task_params.outputs.analytical_counts: assert(self.task_params.modalities == ['depth']) d = image_pre(outs['imgs']*1., self.task_params.modalities) cm = get_camera_matrix(self.task_params.img_width, self.task_params.img_height, self.task_params.img_fov) XYZ = get_point_cloud_from_z(100./d[...,0], cm) XYZ = make_geocentric(XYZ*100., self.robot.sensor_height, self.robot.camera_elevation_degree) for i in range(len(self.task_params.analytical_counts.map_sizes)): non_linearity = self.task_params.analytical_counts.non_linearity[i] count, isvalid = bin_points(XYZ*1., map_size=self.task_params.analytical_counts.map_sizes[i], xy_resolution=self.task_params.analytical_counts.xy_resolution[i], z_bins=self.task_params.analytical_counts.z_bins[i]) assert(count.shape[2] == 1), 'only works for n_views equal to 1.' count = count[:,:,0,:,:,:] isvalid = isvalid[:,:,0,:,:,:] if non_linearity == 'none': None elif non_linearity == 'min10': count = np.minimum(count, 10.) elif non_linearity == 'sqrt': count = np.sqrt(count) else: logging.fatal('Undefined non_linearity.') outs['analytical_counts_{:d}'.format(i)] = count # Compute the goal location in the cordinate frame of the robot. if self.task_params.outputs.rel_goal_loc: if self.task_params.type[:14] != 'to_nearest_obj': loc, _, _, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) goal_loc, _, _, goal_theta = self.get_loc_axis(end_nodes, delta_theta=self.task.delta_theta, perturb=end_perturbs[:,0,:]) r_goal, t_goal = _get_relative_goal_loc(goal_loc, loc, theta) rel_goal_loc = np.concatenate((r_goal*np.cos(t_goal), r_goal*np.sin(t_goal), np.cos(goal_theta-theta), np.sin(goal_theta-theta)), axis=1) outs['rel_goal_loc'] = np.expand_dims(rel_goal_loc, axis=1) elif self.task_params.type[:14] == 'to_nearest_obj': rel_goal_loc = np.zeros((self.task_params.batch_size, 1, len(self.task_params.semantic_task.class_map_names)), dtype=np.float32) for i in range(self.task_params.batch_size): t = target_class[i] rel_goal_loc[i,0,t] = 1. outs['rel_goal_loc'] = rel_goal_loc # Location on map to plot the trajectory during validation. if self.task_params.outputs.loc_on_map: loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) outs['loc_on_map'] = np.expand_dims(loc, axis=1) # Compute gt_dist to goal if self.task_params.outputs.gt_dist_to_goal: gt_dist_to_goal = np.zeros((len(current_node_ids), 1), dtype=np.float32) for i, n in enumerate(current_node_ids): gt_dist_to_goal[i,0] = self.episode.dist_to_goal[goal_number][i][n] outs['gt_dist_to_goal'] = np.expand_dims(gt_dist_to_goal, axis=1) # Free space in front of you, map and goal as images. if self.task_params.outputs.ego_maps: loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) maps = generate_egocentric_maps(self.task.scaled_maps, self.task_params.map_scales, self.task_params.map_crop_sizes, loc, x_axis, y_axis, theta) for i in range(len(self.task_params.map_scales)): outs['ego_maps_{:d}'.format(i)] = \ np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) if self.task_params.outputs.readout_maps: loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) maps = generate_egocentric_maps(self.task.readout_maps_scaled, self.task_params.readout_maps_scales, self.task_params.readout_maps_crop_sizes, loc, x_axis, y_axis, theta) for i in range(len(self.task_params.readout_maps_scales)): outs['readout_maps_{:d}'.format(i)] = \ np.expand_dims(np.expand_dims(maps[i], axis=1), axis=-1) # Images for the goal. if self.task_params.outputs.ego_goal_imgs: if self.task_params.type[:14] != 'to_nearest_obj': loc, x_axis, y_axis, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) goal_loc, _, _, _ = self.get_loc_axis(end_nodes, delta_theta=self.task.delta_theta, perturb=end_perturbs[:,0,:]) rel_goal_orientation = np.mod( np.int32(current_nodes[:,2:] - end_nodes[:,2:]), self.task_params.n_ori) goal_dist, goal_theta = _get_relative_goal_loc(goal_loc, loc, theta) goals = generate_goal_images(self.task_params.map_scales, self.task_params.map_crop_sizes, self.task_params.n_ori, goal_dist, goal_theta, rel_goal_orientation) for i in range(len(self.task_params.map_scales)): outs['ego_goal_imgs_{:d}'.format(i)] = np.expand_dims(goals[i], axis=1) elif self.task_params.type[:14] == 'to_nearest_obj': for i in range(len(self.task_params.map_scales)): num_classes = len(self.task_params.semantic_task.class_map_names) outs['ego_goal_imgs_{:d}'.format(i)] = np.zeros((self.task_params.batch_size, 1, self.task_params.map_crop_sizes[i], self.task_params.map_crop_sizes[i], self.task_params.goal_channels)) for i in range(self.task_params.batch_size): t = target_class[i] for j in range(len(self.task_params.map_scales)): outs['ego_goal_imgs_{:d}'.format(j)][i,:,:,:,t] = 1. # Incremental locs and theta (for map warping), always in the original scale # of the map, the subequent steps in the tf code scale appropriately. # Scaling is done by just multiplying incremental_locs appropriately. if self.task_params.outputs.egomotion: if step_number == 0: # Zero Ego Motion incremental_locs = np.zeros((self.task_params.batch_size, 1, 2), dtype=np.float32) incremental_thetas = np.zeros((self.task_params.batch_size, 1, 1), dtype=np.float32) else: previous_nodes = self.task.nodes[self.episode.history[:,step_number-1], :]*1 loc, _, _, theta = self.get_loc_axis(current_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number,:]) previous_loc, _, _, previous_theta = self.get_loc_axis( previous_nodes, delta_theta=self.task.delta_theta, perturb=perturbs[:,step_number-1,:]) incremental_locs_ = np.reshape(loc-previous_loc, [self.task_params.batch_size, 1, -1]) t = -np.pi/2+np.reshape(theta*1, [self.task_params.batch_size, 1, -1]) incremental_locs = incremental_locs_*1 incremental_locs[:,:,0] = np.sum(incremental_locs_ * np.concatenate((np.cos(t), np.sin(t)), axis=-1), axis=-1) incremental_locs[:,:,1] = np.sum(incremental_locs_ * np.concatenate((np.cos(t+np.pi/2), np.sin(t+np.pi/2)), axis=-1), axis=-1) incremental_thetas = np.reshape(theta-previous_theta, [self.task_params.batch_size, 1, -1]) outs['incremental_locs'] = incremental_locs outs['incremental_thetas'] = incremental_thetas if self.task_params.outputs.visit_count: # Output the visit count for this state, how many times has the current # state been visited, and how far in the history was the last visit # (except this one) visit_count = np.zeros((self.task_params.batch_size, 1), dtype=np.int32) last_visit = -np.ones((self.task_params.batch_size, 1), dtype=np.int32) if step_number >= 1: h = self.episode.history[:,:(step_number)] visit_count[:,0] = np.sum(h == np.array(current_node_ids).reshape([-1,1]), axis=1) last_visit[:,0] = np.argmax(h[:,::-1] == np.array(current_node_ids).reshape([-1,1]), axis=1) + 1 last_visit[visit_count == 0] = -1 # -1 if not visited. outs['visit_count'] = np.expand_dims(visit_count, axis=1) outs['last_visit'] = np.expand_dims(last_visit, axis=1) return outs def get_features_name(self): f = [] if self.task_params.outputs.images: f.append('imgs') if self.task_params.outputs.rel_goal_loc: f.append('rel_goal_loc') if self.task_params.outputs.loc_on_map: f.append('loc_on_map') if self.task_params.outputs.gt_dist_to_goal: f.append('gt_dist_to_goal') if self.task_params.outputs.ego_maps: for i in range(len(self.task_params.map_scales)): f.append('ego_maps_{:d}'.format(i)) if self.task_params.outputs.readout_maps: for i in range(len(self.task_params.readout_maps_scales)): f.append('readout_maps_{:d}'.format(i)) if self.task_params.outputs.ego_goal_imgs: for i in range(len(self.task_params.map_scales)): f.append('ego_goal_imgs_{:d}'.format(i)) if self.task_params.outputs.egomotion: f.append('incremental_locs') f.append('incremental_thetas') if self.task_params.outputs.visit_count: f.append('visit_count') f.append('last_visit') if self.task_params.outputs.analytical_counts: for i in range(len(self.task_params.analytical_counts.map_sizes)): f.append('analytical_counts_{:d}'.format(i)) if self.task_params.outputs.node_ids: f.append('node_ids') f.append('perturbs') return f def pre_features(self, inputs): if self.task_params.outputs.images: inputs['imgs'] = image_pre(inputs['imgs'], self.task_params.modalities) return inputs class BuildingMultiplexer(): def __init__(self, args, task_number): params = vars(args) for k in params.keys(): setattr(self, k, params[k]) self.task_number = task_number self._pick_data(task_number) logging.info('Env Class: %s.', self.env_class) if self.task_params.task == 'planning': self._setup_planner() elif self.task_params.task == 'mapping': self._setup_mapper() elif self.task_params.task == 'map+plan': self._setup_mapper() else: logging.error('Undefined task: %s'.format(self.task_params.task)) def _pick_data(self, task_number): logging.error('Input Building Names: %s', self.building_names) self.flip = [np.mod(task_number / len(self.building_names), 2) == 1] id = np.mod(task_number, len(self.building_names)) self.building_names = [self.building_names[id]] self.task_params.building_seed = task_number logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) self.building_names = self.building_names[0].split('+') self.flip = [self.flip[0] for _ in self.building_names] logging.error('BuildingMultiplexer: Picked Building Name: %s', self.building_names) logging.error('BuildingMultiplexer: Flipping Buildings: %s', self.flip) logging.error('BuildingMultiplexer: Set building_seed: %d', self.task_params.building_seed) self.num_buildings = len(self.building_names) logging.error('BuildingMultiplexer: Num buildings: %d', self.num_buildings) def _setup_planner(self): # Load building env class. self.buildings = [] for i, building_name in enumerate(self.building_names): b = self.env_class(robot=self.robot, env=self.env, task_params=self.task_params, building_name=building_name, flip=self.flip[i], logdir=self.logdir, building_loader=self.dataset) self.buildings.append(b) def _setup_mapper(self): # Set up the renderer. cp = self.camera_param rgb_shader, d_shader = sru.get_shaders(cp.modalities) r_obj = SwiftshaderRenderer() r_obj.init_display(width=cp.width, height=cp.height, fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far, rgb_shader=rgb_shader, d_shader=d_shader) self.r_obj = r_obj r_obj.clear_scene() # Load building env class. self.buildings = [] wt = [] for i, building_name in enumerate(self.building_names): b = self.env_class(robot=self.robot, env=self.env, task_params=self.task_params, building_name=building_name, flip=self.flip[i], logdir=self.logdir, building_loader=self.dataset, r_obj=r_obj) wt.append(b.get_weight()) b.load_building_into_scene() b.set_building_visibility(False) self.buildings.append(b) wt = np.array(wt).astype(np.float32) wt = wt / np.sum(wt+0.0001) self.building_sampling_weights = wt def sample_building(self, rng): if self.num_buildings == 1: building_id = rng.choice(range(len(self.building_names))) else: building_id = rng.choice(self.num_buildings, p=self.building_sampling_weights) b = self.buildings[building_id] instances = b._gen_rng(rng) self._building_id = building_id return self.buildings[building_id], instances def sample_env(self, rngs): rng = rngs[0]; if self.num_buildings == 1: building_id = rng.choice(range(len(self.building_names))) else: building_id = rng.choice(self.num_buildings, p=self.building_sampling_weights) return self.buildings[building_id] def pre(self, inputs): return self.buildings[self._building_id].pre(inputs) def __del__(self): self.r_obj.clear_scene() logging.error('Clearing scene.')
Fireblend/chromium-crosswalk
refs/heads/master
tools/usb_gadget/echo_gadget.py
48
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """USB echo gadget module. This gadget has pairs of IN/OUT endpoints that echo packets back to the host. """ import math import struct import uuid import gadget import usb_constants import usb_descriptors class EchoGadget(gadget.Gadget): """Echo gadget. """ def __init__(self): """Create an echo gadget. """ device_desc = usb_descriptors.DeviceDescriptor( idVendor=usb_constants.VendorID.GOOGLE, idProduct=usb_constants.ProductID.GOOGLE_ECHO_GADGET, bcdUSB=0x0200, iManufacturer=1, iProduct=2, iSerialNumber=3, bcdDevice=0x0100) fs_config_desc = usb_descriptors.ConfigurationDescriptor( bmAttributes=0x80, MaxPower=50) fs_intr_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=0, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=4, ) fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x01, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=1 # 1ms )) fs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x81, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=1 # 1ms )) fs_config_desc.AddInterface(fs_intr_interface_desc) fs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=5 ) fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x02, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=64, bInterval=0 )) fs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x82, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=64, bInterval=0 )) fs_config_desc.AddInterface(fs_bulk_interface_desc) fs_config_desc.AddInterface(usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 )) fs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bAlternateSetting=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 ) fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x03, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1023, bInterval=1 # 1ms )) fs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x83, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1023, bInterval=1 # 1ms )) fs_config_desc.AddInterface(fs_isoc_interface_desc) hs_config_desc = usb_descriptors.ConfigurationDescriptor( bmAttributes=0x80, MaxPower=50) hs_intr_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=0, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=4 ) hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x01, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=4 # 1ms )) hs_intr_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x81, bmAttributes=usb_constants.TransferType.INTERRUPT, wMaxPacketSize=64, bInterval=4 # 1ms )) hs_config_desc.AddInterface(hs_intr_interface_desc) hs_bulk_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=5 ) hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x02, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=512, bInterval=0 )) hs_bulk_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x82, bmAttributes=usb_constants.TransferType.BULK, wMaxPacketSize=512, bInterval=0 )) hs_config_desc.AddInterface(hs_bulk_interface_desc) hs_config_desc.AddInterface(usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 )) hs_isoc_interface_desc = usb_descriptors.InterfaceDescriptor( bInterfaceNumber=2, bAlternateSetting=1, bInterfaceClass=usb_constants.DeviceClass.VENDOR, bInterfaceSubClass=0, bInterfaceProtocol=0, iInterface=6 ) hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x03, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1024, bInterval=4 # 1ms )) hs_isoc_interface_desc.AddEndpoint(usb_descriptors.EndpointDescriptor( bEndpointAddress=0x83, bmAttributes=usb_constants.TransferType.ISOCHRONOUS, wMaxPacketSize=1024, bInterval=4 # 1ms )) hs_config_desc.AddInterface(hs_isoc_interface_desc) super(EchoGadget, self).__init__( device_desc, fs_config_desc, hs_config_desc) self.AddStringDescriptor(1, 'Google Inc.') self.AddStringDescriptor(2, 'Echo Gadget') self.AddStringDescriptor(3, '{:06X}'.format(uuid.getnode())) self.AddStringDescriptor(4, 'Interrupt Echo') self.AddStringDescriptor(5, 'Bulk Echo') self.AddStringDescriptor(6, 'Isochronous Echo') def ReceivePacket(self, endpoint, data): """Echo a packet back to the host. Args: endpoint: Incoming endpoint (must be an OUT pipe). data: Packet data. """ assert endpoint & usb_constants.Dir.IN == 0 self.SendPacket(endpoint | usb_constants.Dir.IN, data) def RegisterHandlers(): """Registers web request handlers with the application server.""" import server from tornado import web class WebConfigureHandler(web.RequestHandler): def post(self): server.SwitchGadget(EchoGadget()) server.app.add_handlers('.*$', [ (r'/echo/configure', WebConfigureHandler), ])
NanaLich/shadowsocks
refs/heads/master
tests/coverage_server.py
1072
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. if __name__ == '__main__': import tornado.ioloop import tornado.web import urllib class MainHandler(tornado.web.RequestHandler): def get(self, project): try: with open('/tmp/%s-coverage' % project, 'rb') as f: coverage = f.read().strip() n = int(coverage.strip('%')) if n >= 80: color = 'brightgreen' else: color = 'yellow' self.redirect(('https://img.shields.io/badge/' 'coverage-%s-%s.svg' '?style=flat') % (urllib.quote(coverage), color)) except IOError: raise tornado.web.HTTPError(404) application = tornado.web.Application([ (r"/([a-zA-Z0-9\-_]+)", MainHandler), ]) if __name__ == "__main__": application.listen(8888, address='127.0.0.1') tornado.ioloop.IOLoop.instance().start()
helinwang/Paddle
refs/heads/develop
python/paddle/trainer_config_helpers/layers.py
3
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import collections from paddle.trainer.config_parser import * from .activations import LinearActivation, SigmoidActivation, TanhActivation, \ ReluActivation, IdentityActivation, SoftmaxActivation from .evaluators import * from .poolings import MaxPooling, AvgPooling, BasePoolingType from .attrs import * from .default_decorators import * try: import cPickle as pickle except ImportError: import pickle import copy __all__ = [ "full_matrix_projection", "AggregateLevel", "ExpandLevel", "identity_projection", "dotmul_projection", "dotmul_operator", "repeat_layer", "table_projection", "mixed_layer", "data_layer", "embedding_layer", "fc_layer", "grumemory", "pooling_layer", "lstmemory", "last_seq", "first_seq", "cos_sim", "hsigmoid", "conv_projection", "regression_cost", 'classification_cost', "LayerOutput", 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', 'img_cmrnorm_layer', 'addto_layer', 'concat_layer', 'lstm_step_layer', 'recurrent_group', 'memory', 'StaticInput', 'expand_layer', 'scaling_layer', 'scaling_projection', 'power_layer', 'interpolation_layer', 'bilinear_interp_layer', 'trans_layer', 'sum_to_one_norm_layer', 'get_output_layer', 'LayerType', 'context_projection', 'beam_search', 'maxid_layer', 'GeneratedInput', 'SubsequenceInput', 'gru_step_layer', 'recurrent_layer', 'BaseGeneratedInput', 'conv_operator', 'conv_shift_layer', 'tensor_layer', 'selective_fc_layer', 'sampling_id_layer', 'slope_intercept_layer', 'trans_full_matrix_projection', 'linear_comb_layer', 'convex_comb_layer', 'ctc_layer', 'warp_ctc_layer', 'crf_layer', 'crf_decoding_layer', 'nce_layer', 'cross_entropy_with_selfnorm', 'cross_entropy', 'multi_binary_label_cross_entropy', 'sum_cost', 'rank_cost', 'lambda_cost', 'huber_cost', 'block_expand_layer', 'maxout_layer', 'out_prod_layer', 'print_layer', 'spp_layer', ] class LayerType(object): """ Layer type enumerations. """ DATA = "data" MIXED_LAYER = "mixed" LSTMEMORY = "lstmemory" GRUMEMORY = "gated_recurrent" SEQUENCE_LAST_INSTANCE = "seqlastins" SEQUENCE_FIRST_INSTANCE = "seqfirstins" POOLING_MAX = "max" POOLING_AVG = 'average' FC_LAYER = "fc" COST = 'cost' COSINE_SIM_VEC = 'cos_vm' COSINE_SIM = 'cos' HSIGMOID = 'hsigmoid' CONV_LAYER = "conv" CONVTRANS_LAYER = "convt" EXCONV_LAYER = "exconv" EXCONVTRANS_LAYER = "exconvt" CUDNNCONV_LAYER = "cudnn_conv" POOL_LAYER = "pool" BATCH_NORM_LAYER = 'batch_norm' NORM_LAYER = 'norm' SUM_TO_ONE_NORM_LAYER = 'sum_to_one_norm' ADDTO_LAYER = 'addto' CONCAT_LAYER = 'concat' CONCAT_PROJ_LAYER = 'concat2' LSTM_STEP_LAYER = 'lstm_step' GRU_STEP_LAYER = 'gru_step' GET_OUTPUT_LAYER = 'get_output' EXPAND_LAYER = 'expand' INTERPOLATION_LAYER = 'interpolation' BILINEAR_INTERP_LAYER = 'bilinear_interp' POWER_LAYER = 'power' SCALING_LAYER = 'scaling' TRANS_LAYER = 'trans' OUT_PROD_LAYER = 'out_prod' FEATURE_MAP_EXPAND_LAYER = 'featmap_expand' MEMORY = 'memory' MAXID_LAYER = 'maxid' EOSID_LAYER = 'eos_id' RECURRENT_LAYER = 'recurrent' CONV_SHIFT_LAYER = "conv_shift" TENSOR_LAYER = "tensor" SEL_FC_LAYER = "selective_fc" SAMPLING_ID_LAYER = "sampling_id" SLOPE_INTERCEPT_LAYER = "slope_intercept" LINEAR_COMBINATION_LAYER = "convex_comb" BLOCK_EXPAND = "blockexpand" MAXOUT = "maxout" SPP_LAYER = "spp" PRINT_LAYER = "print" CTC_LAYER = "ctc" WARP_CTC_LAYER = "warp_ctc" CRF_LAYER = "crf" CRF_DECODING_LAYER = "crf_decoding" NCE_LAYER = 'nce' RANK_COST = "rank-cost" LAMBDA_COST = "lambda_cost" HUBER = "huber" CROSS_ENTROPY = "multi-class-cross-entropy" CROSS_ENTROPY_WITH_SELFNORM = "multi_class_cross_entropy_with_selfnorm" SOFT_BIN_CLASS_CROSS_ENTROPY = "soft_binary_class_cross_entropy" MULTI_BIN_LABEL_CROSS_ENTROPY = "multi_binary_label_cross_entropy" SUM_COST = "sum_cost" @staticmethod def is_layer_type(type_name): """ If type_name is a layer type. :param type_name: layer type name. Because layer type enumerations are strings. :type type_name: basestring :return: True if is a layer_type :rtype: bool """ for key in dir(LayerType): if key.isupper(): att = getattr(LayerType, key) if isinstance(att, basestring) and type_name == att: return True return False class AggregateLevel(object): EACH_TIMESTEP = 'non-seq' EACH_SEQUENCE = 'seq' class LayerOutput(object): """ LayerOutput is output for layer function. It is used internally by several reasons. - Check layer connection make sense. - FC(Softmax) => Cost(MSE Error) is not good for example. - Tracking layer connection. - Pass to layer methods as input. :param name: Layer output name. :type name: basestring :param layer_type: Current Layer Type. One of LayerType enumeration. :type layer_type: basestring :param activation: Layer Activation. :type activation: BaseActivation. :param parents: Layer's parents. :type parents: list|tuple|collections.Sequence """ def __init__(self, name, layer_type, parents=None, activation=None, num_filters=None, img_norm_type=None, size=None, outputs=None, reverse=None): assert isinstance(name, basestring) assert isinstance(layer_type, basestring) assert size is not None assert LayerType.is_layer_type(layer_type) self.name = name self.layer_type = layer_type if parents is not None and type(parents) != list: parents = [parents] self.parents = [] if parents is None else parents self.activation = activation self.num_filters = num_filters self.img_norm_type = img_norm_type self.size = size if outputs is None: outputs = ['default'] self.outputs = outputs self.reverse = reverse def __repr__(self): """ Disable __repr__ for debug reason. Will be implemented when release """ assert False, "this method should not be invoked" def __str__(self): """ Disable __str__ for debug reason. Will be implemented when release """ assert False, "this method should not be invoked" ERROR_CLIPPING = 'error_clipping_threshold' DROPOUT = 'drop_rate' DEVICE = 'device' def layer_support(*attrs): attrs_list = list(attrs) attrs_list.append(DEVICE) def decorator(method): @functools.wraps(method) def wrapper(*args, **kwargs): for attr in attrs_list: for each in args: if isinstance(each, ExtraLayerAttribute): setattr(each, '_'.join(['can', attr]), True) for key in kwargs: val = kwargs[key] if isinstance(val, ExtraLayerAttribute): setattr(val, '_'.join(['can', attr]), True) for each in args: if isinstance(each, ExtraLayerAttribute): each.check(method.__name__) for key in kwargs: val = kwargs[key] if isinstance(val, ExtraLayerAttribute): val.check(method.__name__) return method(*args, **kwargs) return wrapper return decorator @wrap_param_attr_default() def full_matrix_projection(input, size=0, param_attr=None): """ Full Matrix Projection. It performs full matrix multiplication. .. math:: out.row[i] += in.row[i] * weight There are two styles of usage. 1. When used in mixed_layer like this, you can only set the input: .. code-block:: python with mixed_layer(size=100) as m: m += full_matrix_projection(input=layer) 2. When used as an independant object like this, you must set the size: .. code-block:: python proj = full_matrix_projection(input=layer, size=100, param_attr=ParamAttr(name='_proj')) :param input: input layer :type input: LayerOutput :param size: The parameter size. Means the width of parameter. :type size: int :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A FullMatrixProjection Object. :rtype: FullMatrixProjection """ proj = FullMatrixProjection( input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj @wrap_param_attr_default() def trans_full_matrix_projection(input, size=0, param_attr=None): """ Different from full_matrix_projection, this projection performs matrix multiplication, using transpose of weight. .. math:: out.row[i] += in.row[i] * w^\mathrm{T} :math:`w^\mathrm{T}` means transpose of weight. The simply usage is: .. code-block:: python proj = trans_full_matrix_projection(input=layer, size=100, param_attr=ParamAttr( name='_proj', initial_mean=0.0, initial_std=0.01)) :param input: input layer :type input: LayerOutput :param size: The parameter size. Means the width of parameter. :type size: int :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A TransposedFullMatrixProjection Object. :rtype: TransposedFullMatrixProjection """ proj = TransposedFullMatrixProjection( input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj @wrap_param_attr_default() def table_projection(input, size=0, param_attr=None): """ Table Projection. It selects rows from parameter where row\_id is in input\_ids. .. math:: out.row[i] += table.row[ids[i]] where :math:`out` is output, :math:`table` is parameter, :math:`ids` is input\_ids, and :math:`i` is row\_id. There are two styles of usage. 1. When used in mixed_layer like this, you can only set the input: .. code-block:: python with mixed_layer(size=100) as m: m += table_projection(input=layer) 2. When used as an independant object like this, you must set the size: .. code-block:: python proj = table_projection(input=layer, size=100, param_attr=ParamAttr(name='_proj')) :param input: Input layer, which must contains id fields. :type input: LayerOutput :param size: The parameter size. Means the width of parameter. :type size: int :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A TableProjection Object. :rtype: TableProjection """ proj = TableProjection( input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj def identity_projection(input, offset=None): """ 1. IdentityProjection if offset=None. It performs: .. math:: out.row[i] += in.row[i] The example usage is: .. code-block:: python proj = identity_projection(input=layer) 2. IdentityOffsetProjection if offset!=None. It likes IdentityProjection, but layer size may be smaller than input size. It select dimesions [offset, offset+layer_size) from input: .. math:: out.row[i] += in.row[i + \\textrm{offset}] The example usage is: .. code-block:: python proj = identity_projection(input=layer, offset=10) Note that both of two projections should not have any parameter. :param input: Input Layer. :type input: LayerOutput :param offset: Offset, None if use default. :type offset: int :return: A IdentityProjection or IdentityOffsetProjection object :rtype: IdentityProjection or IdentityOffsetProjection """ if offset is None: proj = IdentityProjection(input_layer_name=input.name) proj.origin = input else: proj = IdentityOffsetProjection( input_layer_name=input.name, offset=offset) proj.origin = input return proj @wrap_param_attr_default() def scaling_projection(input, param_attr=None): """ scaling_projection multiplies the input with a scalar parameter and add to the output. .. math:: out += w * in The example usage is: .. code-block:: python proj = scaling_projection(input=layer) :param input: Input Layer. :type input: LayerOutput :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A ScalingProjection object :rtype: ScalingProjection """ proj = ScalingProjection(input_layer_name=input.name, **param_attr.attr) proj.origin = input return proj @wrap_param_attr_default() def dotmul_projection(input, param_attr=None): """ DotMulProjection with a layer as input. It performs element-wise multiplication with weight. .. math:: out.row[i] += in.row[i] .* weight where :math:`.*` means element-wise multiplication. The example usage is: .. code-block:: python proj = dotmul_projection(input=layer) :param input: Input layer. :type input: LayerOutput :param param_attr: Parameter config, None if use default. :type param_attr: ParameterAttribute :return: A DotMulProjection Object. :rtype: DotMulProjection """ proj = DotMulProjection( input_layer_name=input.name, size=input.size, **param_attr.attr) proj.origin = input return proj def dotmul_operator(a=None, b=None, scale=1, **kwargs): """ DotMulOperator takes two inputs and performs element-wise multiplication: .. math:: out.row[i] += scale * (x.row[i] .* y.row[i]) where :math:`.*` means element-wise multiplication, and scale is a config scalar, its default value is one. The example usage is: .. code-block:: python op = dotmul_operator(x=layer1, y=layer2, scale=0.5) :param a: Input layer1 :type a: LayerOutput :param b: Input layer2 :type b: LayerOutput :param scale: config scalar, default value is one. :type scale: float :return: A DotMulOperator Object. :rtype: DotMulOperator """ if 'x' in kwargs or 'y' in kwargs: logger.warning('x and y arguments for dotmul_operator is deprecated. ' 'Please use a and b as parameter.') a = kwargs.get('x', a) # For Backward capacity. b = kwargs.get('y', b) assert isinstance(a, LayerOutput) assert isinstance(b, LayerOutput) if a.size is not None and b.size is not None: assert a.size == b.size op = DotMulOperator(input_layer_names=[a.name, b.name], scale=scale) op.origin = [a, b] return op @wrap_bias_attr_default(['padding_attr']) def context_projection(input, context_len, context_start=None, padding_attr=False): """ Context Projection. It just simply reorganizes input sequence, combines "context_len" sequence to one context from context_start. "context_start" will be set to -(context_len - 1) / 2 by default. If context position out of sequence length, padding will be filled as zero if padding_attr = False, otherwise it is trainable. For example, origin sequence is [A B C D E F G], context len is 3, then after context projection and not set padding_attr, sequence will be [ 0AB ABC BCD CDE DEF EFG FG0 ]. :param input: Input Sequence. :type input: LayerOutput :param context_len: context length. :type context_len: int :param context_start: context start position. Default is -(context_len - 1)/2 :type context_start: int :param padding_attr: Padding Parameter Attribute. If false, it means padding always be zero. Otherwise Padding is learnable, and parameter attribute is set by this parameter. :type padding_attr: bool|ParameterAttribute :return: Projection :rtype: Projection """ context_start = -( context_len - 1) / 2 if context_start is None else context_start extra_dict = dict() trainable = isinstance(padding_attr, ParameterAttribute) if trainable: extra_dict = padding_attr.attr proj = ContextProjection( input_layer_name=input.name, context_length=context_len, context_start=context_start, trainable_padding=trainable, **extra_dict) proj.origin = input return proj class MixedLayerType(LayerOutput): """ The internal object for trainer_helpers. """ class AddToSealedMixedLayerException(Exception): def __init__(self): Exception.__init__(self) def __init__(self, name, size, act, bias_attr, layer_attr, parents=None): """ Ctor. :param name: layer name. :type name: basestring :param size: layer size. :type size: int :param act: activation type. :type act: BaseActivation :param bias_attr: The Bias Attribute. If no bias, then pass False or something not type of ParameterAttribute. None will get a default Bias. :type bias_attr: ParameterAttribute or None means has bias. Any other type means no bias. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute or None """ LayerOutput.__init__( self, name, LayerType.MIXED_LAYER, parents, size=size, activation=act) self.bias_attr = bias_attr self.layer_attr = layer_attr self.inputs = [] self.finalized = False def __iadd__(self, other): """ + += operator :param other: Other projection. :type other: Projection :return: self. :rtype: MixedLayerType """ if not self.finalized: assert isinstance(other, Projection) or isinstance(other, Operator) self.inputs.append(other) if isinstance(other, Projection): self.parents.append(other.origin) else: self.parents.extend(other.origin) return self else: raise MixedLayerType.AddToSealedMixedLayerException() def __enter__(self): assert len(self.inputs) == 0 return self def __exit__(self, *args, **kwargs): del args, kwargs # unused parameter to suppress warning assert len(self.inputs) != 0 ml = MixedLayer( name=self.name, size=self.size, active_type=self.activation.name, bias=ParamAttr.to_bias(self.bias_attr), inputs=self.inputs, **ExtraLayerAttribute.to_kwargs(self.layer_attr)) # update the size which might be computed inside MixedLayer # according to the operator's output size self.size = ml.config.size @wrap_name_default("mixed") @wrap_act_default(act=LinearActivation()) @wrap_bias_attr_default(has_bias=False) @layer_support(ERROR_CLIPPING, DROPOUT) def mixed_layer(size=0, input=None, name=None, act=None, bias_attr=False, layer_attr=None): """ Mixed Layer. A mixed layer will add all inputs together, then activate. Each inputs is a projection or operator. There are two styles of usages. 1. When not set inputs parameter, use mixed_layer like this: .. code-block:: python with mixed_layer(size=256) as m: m += full_matrix_projection(input=layer1) m += identity_projection(input=layer2) 2. You can also set all inputs when invoke mixed_layer as follows: .. code-block:: python m = mixed_layer(size=256, input=[full_matrix_projection(input=layer1), full_matrix_projection(input=layer2)]) :param name: mixed layer name. Can be referenced by other layer. :type name: basestring :param size: layer size. :type size: int :param input: inputs layer. It is an optional parameter. If set, then this function will just return layer's name. :param act: Activation Type. :type act: BaseActivation :param bias_attr: The Bias Attribute. If no bias, then pass False or something not type of ParameterAttribute. None will get a default Bias. :type bias_attr: ParameterAttribute or None or bool :param layer_attr: The extra layer config. Default is None. :type layer_attr: ExtraLayerAttribute :return: MixedLayerType object can add inputs or layer name. :rtype: MixedLayerType """ if input is None: return MixedLayerType(name, size, act, bias_attr, layer_attr) else: with mixed_layer( name=name, size=size, act=act, bias_attr=bias_attr, layer_attr=layer_attr) as m: if isinstance(input, collections.Sequence): for each in input: m += each else: m += input return m @layer_support() def data_layer(name, size, height=None, width=None, layer_attr=None): """ Define DataLayer For NeuralNetwork. The example usage is: .. code-block:: python data = data_layer(name="input", size=1000) :param name: Name of this data layer. :type name: basestring :param size: Size of this data layer. :type size: int :param height: Height of this data layer, used for image :type size: int|None :param width: Width of this data layer, used for image :type size: int|None :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ Layer( type=LayerType.DATA, name=name, size=size, height=height, width=width, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.DATA, size=size) @wrap_name_default("embedding") @wrap_param_attr_default() @layer_support(ERROR_CLIPPING) def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): """ Define a embedding Layer. :param name: Name of this embedding layer. :type name: basestring :param input: The input layer for this embedding. NOTE: must be Index Data. :type input: LayerOutput :param size: The embedding dimension. :type size: int :param param_attr: The embedding parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute|None :param layer_attr: Extra layer Config. Default is None. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ with mixed_layer( name=name, size=size, act=LinearActivation(), bias_attr=False, layer_attr=layer_attr) as mix: mix += table_projection(input=input, size=size, param_attr=param_attr) return mix @wrap_name_default() @wrap_param_attr_default() @wrap_bias_attr_default() @wrap_act_default() @layer_support(ERROR_CLIPPING, DROPOUT) def fc_layer(input, size, act=None, name=None, param_attr=None, bias_attr=None, layer_attr=None): """ Helper for declare fully connected layer. The example usage is: .. code-block:: python fc = fc_layer(input=layer, size=1024, act=LinearActivation(), bias_attr=False) which is equal to: .. code-block:: python with mixed_layer(size=1024) as fc: fc += full_matrix_projection(input=layer) :param name: The Layer Name. :type name: basestring :param input: The input layer. Could be a list/tuple of input layer. :type input: LayerOutput|list|tuple :param size: The layer dimension. :type size: int :param act: Activation Type. Default is tanh. :type act: BaseActivation :param param_attr: The Parameter Attribute|list. :type param_attr: ParameterAttribute :param bias_attr: The Bias Attribute. If no bias, then pass False or something not type of ParameterAttribute. None will get a default Bias. :type bias_attr: ParameterAttribute|None|Any :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] assert not isinstance(param_attr, collections.Sequence) param_attr = [param_attr] else: if isinstance(param_attr, collections.Sequence): assert len(input) == len(param_attr) else: param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))] assert isinstance(input, collections.Sequence) Layer( inputs=[ Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr) ], name=name, type=LayerType.FC_LAYER, size=size, bias=ParamAttr.to_bias(bias_attr), active_type=act.name, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.FC_LAYER, input, activation=act, size=size) @wrap_name_default("print") def print_layer(input, name=None): """ Print the output value of input layers. This layer is useful for debugging. :param name: The Layer Name. :type name: basestring :param input: The input layer. Could be a list/tuple of input layer. :type input: LayerOutput|list|tuple :return: LayerOutput """ if isinstance(input, LayerOutput): input = [input] assert isinstance(input, collections.Sequence) # list or tuple for each in input: assert isinstance(each, LayerOutput) Layer( name=name, type=LayerType.PRINT_LAYER, inputs=[l.name for l in input], ) # this layer don't return anything, can not be input of other layer. @wrap_name_default("seq_pooling") @wrap_bias_attr_default(has_bias=False) @wrap_param_default(['pooling_type'], default_factory=lambda _: MaxPooling()) @layer_support() def pooling_layer(input, pooling_type=None, name=None, bias_attr=None, agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ Pooling layer for sequence inputs, not used for Image. The example usage is: .. code-block:: python seq_pool = pooling_layer(input=layer, pooling_type=AvgPooling(), agg_level=AggregateLevel.EACH_SEQUENCE) :param agg_level: AggregateLevel.EACH_TIMESTEP or AggregateLevel.EACH_SEQUENCE :type agg_level: AggregateLevel :param name: layer name. :type name: basestring :param input: input layer name. :type input: LayerOutput :param pooling_type: Type of pooling, MaxPooling(default), AvgPooling, SumPooling, SquareRootNPooling. :type pooling_type: BasePoolingType|None :param bias_attr: Bias parameter attribute. False if no bias. :type bias_attr: ParameterAttribute|None|False :param layer_attr: The Extra Attributes for layer, such as dropout. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerType """ extra_dict = dict() # noinspection PyUnresolvedReferences if isinstance(pooling_type, AvgPooling): extra_dict['average_strategy'] = pooling_type.strategy elif isinstance(pooling_type, MaxPooling) and \ pooling_type.output_max_index is not None: assert isinstance(pooling_type.output_max_index, bool) extra_dict['output_max_index'] = pooling_type.output_max_index extra_dict.update(ExtraLayerAttribute.to_kwargs(layer_attr)) Layer( name=name, type=pooling_type.name, inputs=[Input(input.name)], bias=ParamAttr.to_bias(bias_attr), trans_type=agg_level, **extra_dict) return LayerOutput( name, pooling_type.name, parents=[input], size=input.size) @wrap_bias_attr_default() @wrap_param_attr_default() @wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(param_names=["act", 'state_act'], act=TanhActivation()) @wrap_name_default("lstmemory") @layer_support(DROPOUT) def lstmemory(input, name=None, reverse=False, act=None, gate_act=None, size=None, state_act=None, bias_attr=None, param_attr=None, layer_attr=None): """ Long Short-term Memory Cell. The memory cell was implemented as follow equations. .. math:: i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i) f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f) c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c) o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o) h_t & = o_t tanh(c_t) NOTE: In PaddlePaddle's implementation, the multiplications :math:`W_{xi}x_{t}` , :math:`W_{xf}x_{t}`, :math:`W_{xc}x_t`, :math:`W_{xo}x_{t}` are not done in the lstmemory layer, so an additional mixed_layer with full_matrix_projection or a fc_layer must be included in the configuration file to complete the input-to-hidden mappings before lstmemory is called. NOTE: This is a low level user interface. You can use network.simple_lstm to config a simple plain lstm layer. Please refer to **Generating Sequences With Recurrent Neural Networks** for more details about LSTM. Link_ goes as below. .. _Link: http://arxiv.org/abs/1308.0850 :param name: The lstmemory layer name. :type name: basestring :param input: input layer name. :type input: LayerOutput :param reverse: is sequence process reversed or not. :type reverse: bool :param act: activation type, TanhActivation by default. :math:`h_t` :type act: BaseActivation :param gate_act: gate activation type, SigmoidActivation by default. :type gate_act: BaseActivation :param state_act: state activation type, TanhActivation by default. :type state_act: BaseActivation :param bias_attr: Bias attribute. None means default bias. False means no bias. :type bias_attr: ParameterAttribute|None|False :param param_attr: Parameter Attribute. :type param_attr: ParameterAttribute|None|False :param layer_attr: Extra Layer attribute :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert gate_act.support_hppl assert state_act.support_hppl assert act.support_hppl assert input.size is not None and input.size % 4 == 0 if size is not None: if input.size / 4 == size: plog = logger.warning else: plog = logger.fatal plog("NOTE: The lstmemory layer[%s]'s size is set by previous input " "layer. The lstm size should be equal with input layer size/4. The" " size which is set explicitly will be ignored." % name) Layer( name=name, type=LayerType.LSTMEMORY, active_type=act.name, active_state_type=state_act.name, active_gate_type=gate_act.name, reversed=reverse, bias=ParamAttr.to_bias(bias_attr), inputs=[Input(input.name, **param_attr.attr)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.LSTMEMORY, [input], size=input.size / 4, reverse=reverse) @wrap_bias_attr_default() @wrap_param_attr_default() @wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(param_names=["act"], act=TanhActivation()) @wrap_name_default("gru") @layer_support(DROPOUT) def grumemory(input, name=None, reverse=False, act=None, gate_act=None, size=None, bias_attr=None, param_attr=None, layer_attr=None): """ Gate Recurrent Unit Layer. The memory cell was implemented as follow equations. 1. update gate :math:`z`: defines how much of the previous memory to keep around or the unit updates its activations. The update gate is computed by: .. math:: z_t = \\sigma(W_{z}x_{t} + U_{z}h_{t-1} + b_z) 2. reset gate :math:`r`: determines how to combine the new input with the previous memory. The reset gate is computed similarly to the update gate: .. math:: r_t = \\sigma(W_{r}x_{t} + U_{r}h_{t-1} + b_r) 3. The candidate activation :math:`\\tilde{h_t}` is computed similarly to that of the traditional recurrent unit: .. math:: {\\tilde{h_t}} = tanh(W x_{t} + U (r_{t} \odot h_{t-1}) + b) 4. The hidden activation :math:`h_t` of the GRU at time t is a linear interpolation between the previous activation :math:`h_{t-1}` and the candidate activation :math:`\\tilde{h_t}`: .. math:: h_t = (1 - z_t) h_{t-1} + z_t {\\tilde{h_t}} NOTE: In PaddlePaddle's implementation, the multiplication operations :math:`W_{r}x_{t}`, :math:`W_{z}x_{t}` and :math:`W x_t` are not computed in gate_recurrent layer. Consequently, an additional mixed_layer with full_matrix_projection or a fc_layer must be included before grumemory is called. More details can be found by referring to `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling. <https://arxiv.org/abs/1412.3555>`_ The simple usage is: .. code-block:: python gru = grumemory(input) :param name: The gru layer name. :type name: None|basestring :param input: input layer. :type input: LayerOutput. :param reverse: Whether sequence process is reversed or not. :type reverse: bool :param act: activation type, TanhActivation by default. This activation affects the :math:`{\\tilde{h_t}}`. :type act: BaseActivation :param gate_act: gate activation type, SigmoidActivation by default. This activation affects the :math:`z_t` and :math:`r_t`. It is the :math:`\\sigma` in the above formula. :type gate_act: BaseActivation :param bias_attr: Bias attribute. None means default bias. False means no bias. :type bias_attr: ParameterAttribute|None|False :param param_attr: Parameter Attribute. :type param_attr: ParameterAttribute|None|False :param layer_attr: Extra Layer attribute :type layer_attr: ExtraLayerAttribute|None :param size: Stub parameter of size, but actually not used. If set this size will get a warning. :type size: None :return: LayerOutput object. :rtype: LayerOutput """ assert act.support_hppl assert gate_act.support_hppl assert input.size is not None and input.size % 3 == 0 if size is not None: if input.size / 3 == size: plog = logger.warning else: plog = logger.fatal plog("NOTE: the gru memory layer's size is set by previous input layer," " and should be input size / 3. Set size explicitly will be " "ignored.") Layer( name=name, type=LayerType.GRUMEMORY, active_type=act.name, active_gate_type=gate_act.name, reversed=reverse, bias=ParamAttr.to_bias(bias_attr), inputs=[Input(input.name, **param_attr.attr)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.GRUMEMORY, [input], size=input.size / 3, reverse=reverse) @wrap_name_default() @layer_support() def last_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ Get Last Timestamp Activation of a sequence. :param agg_level: Aggregated level :param name: Layer name. :type name: basestring :param input: Input layer name. :type input: LayerOutput :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ if input.reverse is not None and input.reverse: logger.warning("You are getting the last instance of a sequence that" " is a output of a REVERSED layer. There is no time" " series information at all. Maybe you want to use" " first_seq instead.") Layer( name=name, type=LayerType.SEQUENCE_LAST_INSTANCE, inputs=[input.name], trans_type=agg_level, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SEQUENCE_LAST_INSTANCE, parents=[input], size=input.size) @wrap_name_default() @layer_support() def first_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ Get First Timestamp Activation of a sequence. :param agg_level: aggregation level :param name: Layer name. :type name: basestring :param input: Input layer name. :type input: LayerOutput :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ if input.reverse is not None and not input.reverse: logger.warning('You are getting the first instance for a time series,' ' and it is a normal recurrent layer output. There is no' ' time series information at all. Maybe you want to use' ' last_seq instead.') Layer( name=name, type=LayerType.SEQUENCE_FIRST_INSTANCE, inputs=[input.name], trans_type=agg_level, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SEQUENCE_FIRST_INSTANCE, parents=[input], size=input.size) class ExpandLevel(object): FROM_TIMESTEP = AggregateLevel.EACH_TIMESTEP FROM_SEQUENCE = AggregateLevel.EACH_SEQUENCE @wrap_name_default() @layer_support() def expand_layer(input, expand_as, name=None, bias_attr=False, expand_level=ExpandLevel.FROM_TIMESTEP, layer_attr=None): """ A layer for "Expand Dense data or (sequence data where the length of each sequence is one) to sequence data." The example usage is: .. code-block:: python expand = expand_layer(input=layer1, expand_as=layer2, expand_level=ExpandLevel.FROM_TIMESTEP) :param input: Input layer :type input: LayerOutput :param expand_as: Expand as this layer's sequence info. :type expand_as: LayerOutput :param name: Layer name. :type name: basestring :param bias_attr: Bias attribute. None means default bias. False means no bias. :type bias_attr: ParameterAttribute|None|False :param expand_level: whether input layer is timestep(default) or sequence. :type expand_level: ExpandLevel :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ Layer( inputs=[input.name, expand_as.name], name=name, bias=ParamAttr.to_bias(bias_attr=bias_attr), type=LayerType.EXPAND_LAYER, trans_type=expand_level, **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name=name, size=input.size, layer_type=LayerType.EXPAND_LAYER, parents=[input, expand_as]) @wrap_name_default() @layer_support() def repeat_layer(input, num_repeats, name=None, layer_attr=None): """ A layer for repeating the input for num_repeats times. This is equivalent to apply concat_layer() with num_repeats same input. .. math:: y = [x, x, \cdots, x] The example usage is: .. code-block:: python expand = repeat_layer(layer, 4) :param input: Input layer :type input: LayerOutput :param num_repeats: Repeat the input so many times :type num_repeats: int :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ l = Layer( inputs=[input.name], name=name, num_filters=num_repeats, type=LayerType.FEATURE_MAP_EXPAND_LAYER, **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name=name, size=l.config.size, layer_type=LayerType.FEATURE_MAP_EXPAND_LAYER, parents=[input]) @wrap_name_default() @layer_support() def interpolation_layer(input, weight, name=None, layer_attr=None): """ This layer is for linear interpolation with two inputs, which is used in NEURAL TURING MACHINE. .. math:: y.row[i] = w[i] * x_1.row[i] + (1 - w[i]) * x_2.row[i] where :math:`x_1` and :math:`x_2` are two (batchSize x dataDim) inputs, :math:`w` is (batchSize x 1) weight vector, and :math:`y` is (batchSize x dataDim) output. The example usage is: .. code-block:: python interpolation = interpolation_layer(input=[layer1, layer2], weight=layer3) :param input: Input layer. :type input: list|tuple :param weight: Weight layer. :type weight: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, collections.Sequence) assert len(input) == 2 assert isinstance(input[0], LayerOutput) and isinstance(input[1], LayerOutput) if input[0].size is not None and input[1].size is not None: assert input[0].size == input[1].size assert isinstance(weight, LayerOutput) if weight.size is not None: assert weight.size == 1 Layer( name=name, type=LayerType.INTERPOLATION_LAYER, inputs=[weight.name, input[0].name, input[1].name], **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.INTERPOLATION_LAYER, parents=[weight, input[0], input[1]], size=input[0].size) @wrap_name_default() @layer_support() def bilinear_interp_layer(input, out_size_x=None, out_size_y=None, name=None, layer_attr=None): """ This layer is to implement bilinear interpolation on conv layer output. Please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation The simple usage is: .. code-block:: python bilinear = bilinear_interp_layer(input=layer1, out_size_x=64, out_size_y=64) :param input: A input layer. :type input: LayerOutput. :param out_size_x: bilinear interpolation output width. :type out_size_x: int|None :param out_size_y: bilinear interpolation output height. :type out_size_y: int|None :param name: The layer's name, which cna not be specified. :type name: None|basestring :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert input.layer_type == LayerType.CONV_LAYER assert isinstance(input.activation, LinearActivation) assert out_size_x > 0 and out_size_y > 0 assert input.num_filters is not None num_channels = input.num_filters l = Layer( name=name, inputs=Input( input.name, bilinear_interp=BilinearInterp( out_size_x=out_size_x, out_size_y=out_size_y, channels=num_channels)), type=LayerType.BILINEAR_INTERP_LAYER, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], num_filters=num_channels, size=l.config.size) @wrap_name_default() @layer_support() def power_layer(input, weight, name=None, layer_attr=None): """ This layer applies a power function to a vector element-wise, which is used in NEURAL TURING MACHINE. .. math:: y = x^w where :math:`x` is a input vector, :math:`w` is scalar weight, and :math:`y` is a output vector. The example usage is: .. code-block:: python power = power_layer(input=layer1, weight=layer2) :param input: Input layer. :type input: LayerOutput :param weight: Weight layer. :type weight: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) and isinstance(weight, LayerOutput) if weight.size is not None: assert weight.size == 1 Layer( name=name, type=LayerType.POWER_LAYER, inputs=[weight.name, input.name], **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.POWER_LAYER, parents=[input, weight], size=input.size) @wrap_name_default() @layer_support() def scaling_layer(input, weight, name=None, layer_attr=None): """ A layer for multiplying input vector by weight scalar. .. math:: y = w x where :math:`x` is size=dataDim input, :math:`w` is size=1 weight, and :math:`y` is size=dataDim output. Note that the above computation is for one sample. Multiple samples are processed in one batch. The example usage is: .. code-block:: python scale = scaling_layer(input=layer1, weight=layer2) :param input: Input layer. :type input: LayerOutput :param weight: Weight layer. :type weight: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(weight, LayerOutput) and isinstance(input, LayerOutput) if weight.size is not None: assert weight.size == 1 Layer( name=name, type=LayerType.SCALING_LAYER, inputs=[weight.name, input.name], **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SCALING_LAYER, parents=[weight, input], size=input.size) @wrap_name_default() @layer_support() def trans_layer(input, name=None, layer_attr=None): """ A layer for transposition. .. math:: y = x^\mathrm{T} where :math:`x` is (M x N) input, and :math:`y` is (N x M) output. The example usage is: .. code-block:: python trans = trans_layer(input=layer) :param input: Input layer. :type input: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ Layer( name=name, type=LayerType.TRANS_LAYER, inputs=[input.name], **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.TRANS_LAYER, parents=[input], size=input.size) @wrap_name_default() @layer_support() def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None): """ Cosine Similarity Layer. The cosine similarity equation is here. .. math:: similarity = cos(\\theta) = {\\mathbf{a} \\cdot \\mathbf{b} \\over \\|\\mathbf{a}\\| \\|\\mathbf{b}\\|} The size of a is M, size of b is M*N, Similarity will be calculated N times by step M. The output size is N. The scale will be multiplied to similarity. Note that the above computation is for one sample. Multiple samples are processed in one batch. :param name: layer name :type name: basestring :param a: input layer a :type a: LayerOutput :param b: input layer b :type b: LayerOutput :param scale: scale for cosine value. default is 5. :type scale: float :param size: layer size. NOTE size_a * size should equal size_b. :type size: int :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput) if size == 1: Layer( name=name, type=LayerType.COSINE_SIM, cos_scale=scale, inputs=[a.name, b.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) else: if a.size is not None and b.size is not None: assert size == b.size / a.size Layer( name=name, type=LayerType.COSINE_SIM_VEC, size=size, cos_scale=scale, inputs=[a.name, b.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b], size=size) @wrap_name_default() @wrap_bias_attr_default(has_bias=True) @wrap_param_attr_default() @layer_support() def hsigmoid(input, label, num_classes, name=None, bias_attr=None, param_attr=None, layer_attr=None): """ Organize the classes into a binary tree. At each node, a sigmoid function is used to calculate the probability of belonging to the right branch. This idea is from "F. Morin, Y. Bengio (AISTATS 05): Hierarchical Probabilistic Neural Network Language Model." The example usage is: .. code-block:: python cost = hsigmoid(input=[layer1, layer2], label=data_layer, num_classes=3) :param input: Input layers. It could be a LayerOutput or list/tuple of LayerOutput. :type input: LayerOutput|list|tuple :param label: Label layer. :type label: LayerOutput :param num_classes: number of classes. :type num_classes: int :param name: layer name :type name: basestring :param bias_attr: Bias attribute. None means default bias. False means no bias. :type bias_attr: ParameterAttribute|False :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] if not isinstance(param_attr, collections.Sequence): param_attr = [param_attr] else: if not isinstance(param_attr, collections.Sequence): param_attr = [param_attr] * len(input) else: assert len(param_attr) == len(input) assert isinstance(input, collections.Sequence) assert isinstance(label, LayerOutput) assert label.layer_type == LayerType.DATA ipts_for_layer = [] parents = [] for each_input, each_param_attr in zip(input, param_attr): assert isinstance(each_input, LayerOutput) ipts_for_layer.append(Input(each_input.name, **each_param_attr.attr)) parents.append(each_input) ipts_for_layer.append(label.name) parents.append(label) l = Layer( name=name, type=LayerType.HSIGMOID, num_classes=num_classes, bias=ParamAttr.to_bias(bias_attr), inputs=ipts_for_layer, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.HSIGMOID, parents=parents, size=l.config.size) @wrap_name_default("conv") @wrap_param_attr_default() @wrap_bias_attr_default() @wrap_act_default(act=ReluActivation()) @layer_support(DROPOUT) def img_conv_layer(input, filter_size, num_filters, name=None, num_channels=None, act=None, groups=1, stride=1, padding=0, bias_attr=None, param_attr=None, shared_biases=True, layer_attr=None, filter_size_y=None, stride_y=None, padding_y=None, trans=False, layer_type=None): """ Convolution layer for image. Paddle only support square input currently and thus input image's width equals height. The details of convolution layer, please refer UFLDL's `convolution <http://ufldl.stanford.edu/tutorial/supervised/ FeatureExtractionUsingConvolution/>`_ . Convolution Transpose (deconv) layer for image. Paddle only support square input currently and thus input image's width equals height. The details of convolution transpose layer, please refer to the following explanation and references therein <http://datascience.stackexchange.com/questions/6107/ what-are-deconvolutional-layers/>`_ . The num_channel means input image's channel number. It may be 1 or 3 when input is raw pixels of image(mono or RGB), or it may be the previous layer's num_filters * num_group. There are several group of filter in PaddlePaddle implementation. Each group will process some channel of the inputs. For example, if an input num_channel = 256, group = 4, num_filter=32, the PaddlePaddle will create 32*4 = 128 filters to process inputs. The channels will be split into 4 pieces. First 256/4 = 64 channels will process by first 32 filters. The rest channels will be processed by rest group of filters. :param name: Layer name. :type name: basestring :param input: Layer Input. :type input: LayerOutput :param filter_size: The x dimension of a filter kernel. Or input a tuple for two image dimension. :type filter_size: int|tuple|list :param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle currently supports rectangular filters, the filter's shape will be (filter_size, filter_size_y). :type filter_size_y: int|None :param num_filters: Each filter group's number of filter :param act: Activation type. Default is tanh :type act: BaseActivation :param groups: Group size of filters. :type groups: int :param stride: The x dimension of the stride. Or input a tuple for two image dimension. :type stride: int|tuple|list :param stride_y: The y dimension of the stride. :type stride_y: int :param padding: The x dimension of the padding. Or input a tuple for two image dimension :type padding: int|tuple|list :param padding_y: The y dimension of the padding. :type padding_y: int :param bias_attr: Convolution bias attribute. None means default bias. False means no bias. :type bias_attr: ParameterAttribute|False :param num_channels: number of input channels. If None will be set automatically from previous output. :type num_channels: int :param param_attr: Convolution param attribute. None means default attribute :type param_attr: ParameterAttribute :param shared_biases: Is biases will be shared between filters or not. :type shared_biases: bool :param layer_attr: Layer Extra Attribute. :type layer_attr: ExtraLayerAttribute :param trans: true if it is a convTransLayer, false if it is a convLayer :type trans: bool :param layer_type: specify the layer_type, default is None. If trans=True, layer_type has to be "exconvt", otherwise layer_type has to be either "exconv" or "cudnn_conv" :type layer_type: String :return: LayerOutput object. :rtype: LayerOutput """ if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters if filter_size_y is None: if isinstance(filter_size, collections.Sequence): assert len(filter_size) == 2 filter_size, filter_size_y = filter_size else: filter_size_y = filter_size if stride_y is None: if isinstance(stride, collections.Sequence): assert len(stride) == 2 stride, stride_y = stride else: stride_y = stride if padding_y is None: if isinstance(padding, collections.Sequence): assert len(padding) == 2 padding, padding_y = padding else: padding_y = padding if param_attr.attr.get('initial_smart'): # special initial for conv layers. init_w = (2.0 / (filter_size**2 * num_channels))**0.5 param_attr.attr["initial_mean"] = 0.0 param_attr.attr["initial_std"] = init_w param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False if layer_type: if trans: assert layer_type in ["exconvt"] else: assert layer_type in ["exconv", "cudnn_conv"] lt = layer_type else: lt = LayerType.CONVTRANS_LAYER if trans else LayerType.CONV_LAYER l = Layer( name=name, inputs=Input( input.name, conv=Conv( filter_size=filter_size, padding=padding, stride=stride, channels=num_channels, groups=groups, filter_size_y=filter_size_y, padding_y=padding_y, stride_y=stride_y), **param_attr.attr), active_type=act.name, num_filters=num_filters, bias=ParamAttr.to_bias(bias_attr), shared_biases=shared_biases, type=lt, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, lt, parents=[input], activation=act, num_filters=num_filters, size=l.config.size) @wrap_name_default("pool") @layer_support() def img_pool_layer(input, pool_size, name=None, num_channels=None, pool_type=None, stride=1, padding=0, layer_attr=None, pool_size_y=None, stride_y=None, padding_y=None): """ Image pooling Layer. The details of pooling layer, please refer ufldl's pooling_ . .. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/ :param padding: pooling padding width. :type padding: int :param padding_y: pooling padding height. It's equal to padding by default. :type padding_y: int|None :param name: name of pooling layer :type name: basestring. :param input: layer's input :type input: LayerOutput :param pool_size: pooling window width :type pool_size: int :param pool_size_y: pooling window height. It's eaqual to pool_size by default. :type pool_size_y: int|None :param num_channels: number of input channel. :type num_channels: int :param pool_type: pooling type. MaxPooling or AvgPooling. Default is MaxPooling. :type pool_type: BasePoolingType :param stride: stride width of pooling. :type stride: int :param stride_y: stride height of pooling. It is equal to stride by default. :type stride_y: int|None :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters if pool_type is None: pool_type = MaxPooling() elif isinstance(pool_type, AvgPooling): pool_type.name = 'avg' type_name = pool_type.name + '-projection' \ if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)) \ else pool_type.name pool_size_y = pool_size if pool_size_y is None else pool_size_y stride_y = stride if stride_y is None else stride_y padding_y = padding if padding_y is None else padding_y l = Layer( name=name, type=LayerType.POOL_LAYER, inputs=[ Input( input.name, pool=Pool( pool_type=type_name, channels=num_channels, size_x=pool_size, start=None, stride=stride, padding=padding, size_y=pool_size_y, stride_y=stride_y, padding_y=padding_y)) ], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.POOL_LAYER, parents=[input], num_filters=num_channels, size=l.config.size) @wrap_name_default("spp") @layer_support() def spp_layer(input, name=None, num_channels=None, pool_type=None, pyramid_height=None, layer_attr=None): """ Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. The details please refer to `Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_. :param name: layer name. :type name: basestring :param input: layer's input. :type input: LayerOutput :param num_channels: number of input channel. :type num_channels: int :param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling. :type scale: BasePoolingType :param pyramid_height: pyramid height. :type pyramid_height: int :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters if pool_type is None: pool_type = MaxPooling() elif isinstance(pool_type, AvgPooling): pool_type.name = 'avg' type_name = pool_type.name if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)): type_name += '-projection' l = Layer( name=name, type=LayerType.SPP_LAYER, inputs=Input( input.name, spp=SpatialPyramidPool( pool_type=type_name, channels=num_channels, pyramid_height=pyramid_height)), **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, layer_type=LayerType.SPP_LAYER, parents=[input], num_filters=num_channels, size=l.config.size) def __img_norm_layer__(name, input, size, norm_type, scale, power, num_channels, blocked, layer_attr): if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters l = Layer( name=name, type=LayerType.NORM_LAYER, inputs=Input( input.name, norm=Norm( norm_type=norm_type, channels=num_channels, size=size, scale=scale, pow=power, blocked=blocked)), **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, layer_type=LayerType.NORM_LAYER, parents=[input], num_filters=num_channels, img_norm_type=norm_type, size=l.config.size) @wrap_name_default("crmnorm") @layer_support() def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, name=None, num_channels=None, layer_attr=None): """ Response normalization across feature maps. The details please refer to `Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_. :param name: layer name. :type name: None|basestring :param input: layer's input. :type input: LayerOutput :param size: Normalize in number of :math:`size` feature maps. :type size: int :param scale: The hyper-parameter. :type scale: float :param power: The hyper-parameter. :type power: float :param num_channels: input layer's filers number or channels. If num_channels is None, it will be set automatically. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale, power, num_channels, 0, layer_attr) @wrap_bias_attr_default() @wrap_param_attr_default(default_factory=lambda _: ParamAttr(initial_mean=1.0, initial_std=0.)) @wrap_act_default(act=ReluActivation()) @wrap_name_default("batch_norm") @layer_support(DROPOUT) def batch_norm_layer(input, act=None, name=None, num_channels=None, bias_attr=None, param_attr=None, layer_attr=None, batch_norm_type=None, moving_average_fraction=0.9, use_global_stats=None): """ Batch Normalization Layer. The notation of this layer as follow. :math:`x` is the input features over a mini-batch. .. math:: \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ \ mini-batch\ mean \\\\ \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift The details of batch normalization please refer to this `paper <http://arxiv.org/abs/1502.03167>`_. :param name: layer name. :type name: basestring :param input: batch normalization input. Better be linear activation. Because there is an activation inside batch_normalization. :type input: LayerOutput :param batch_norm_type: We have batch_norm and cudnn_batch_norm. batch_norm supports both CPU and GPU. cudnn_batch_norm requires cuDNN version greater or equal to v4 (>=v4). But cudnn_batch_norm is faster and needs less memory than batch_norm. By default (None), we will automaticly select cudnn_batch_norm for GPU and batch_norm for CPU. Otherwise, select batch norm type based on the specified type. If you use cudnn_batch_norm, we suggested you use latest version, such as v5.1. :type batch_norm_type: None|string, None or "batch_norm" or "cudnn_batch_norm" :param act: Activation Type. Better be relu. Because batch normalization will normalize input near zero. :type act: BaseActivation :param num_channels: num of image channels or previous layer's number of filters. None will automatically get from layer's input. :type num_channels: int :param bias_attr: :math:`\\beta`, better be zero when initialize. So the initial_std=0, initial_mean=1 is best practice. :type bias_attr: ParameterAttribute :param param_attr: :math:`\\gamma`, better be one when initialize. So the initial_std=0, initial_mean=1 is best practice. :type param_attr: ParameterAttribute :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :param use_global_stats: whether use moving mean/variance statistics during testing peroid. If None or True, it will use moving mean/variance statistics during testing. If False, it will use the mean and variance of current batch of test data for testing. :type use_global_stats: bool|None. :param moving_average_fraction: Factor used in the moving average computation, referred to as facotr, :math:`runningMean = newMean*(1-factor) + runningMean*factor` :type moving_average_fraction: float. :return: LayerOutput object. :rtype: LayerOutput """ if not isinstance(act, ReluActivation): logger.log(logging.WARN, "%s is not recommend for batch normalization's activation, " "maybe the relu is better" % act.name) if not isinstance(input.activation, LinearActivation): logger.log(logging.WARN, "The activation should be inside batch normalization, the " "previous layer's activation may be Linear") if num_channels is None: if input.num_filters is not None: num_channels = input.num_filters else: num_channels = input.size assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \ (batch_norm_type == "cudnn_batch_norm") l = Layer( name=name, inputs=Input( input.name, image=Image(channels=num_channels), **param_attr.attr), active_type=act.name, type=LayerType.BATCH_NORM_LAYER, batch_norm_type=batch_norm_type, bias=ParamAttr.to_bias(bias_attr), moving_average_fraction=moving_average_fraction, use_global_stats=use_global_stats, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.BATCH_NORM_LAYER, parents=[input], activation=act, num_filters=num_channels, size=l.config.size) @wrap_name_default() @layer_support() def sum_to_one_norm_layer(input, name=None, layer_attr=None): """ A layer for sum-to-one normalization, which is used in NEURAL TURING MACHINE. .. math:: out[i] = \\frac {in[i]} {\sum_{k=1}^N in[k]} where :math:`in` is a (batchSize x dataDim) input vector, and :math:`out` is a (batchSize x dataDim) output vector. The example usage is: .. code-block:: python sum_to_one_norm = sum_to_one_norm_layer(input=layer) :param input: Input layer. :type input: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ Layer( name=name, type=LayerType.SUM_TO_ONE_NORM_LAYER, inputs=[input.name], **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SUM_TO_ONE_NORM_LAYER, parents=[input], size=input.size) @wrap_name_default("addto") @wrap_act_default(act=LinearActivation()) @wrap_bias_attr_default(has_bias=False) @layer_support(DROPOUT) def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None): """ AddtoLayer. .. math:: y = f(\\sum_{i} x_i + b) where :math:`y` is output, :math:`x` is input, :math:`b` is bias, and :math:`f` is activation function. The example usage is: .. code-block:: python addto = addto_layer(input=[layer1, layer2], act=ReluActivation(), bias_attr=False) This layer just simply add all input layers together, then activate the sum inputs. Each input of this layer should be the same size, which is also the output size of this layer. There is no weight matrix for each input, because it just a simple add operation. If you want a complicated operation before add, please use mixed_layer. It is a very good way to set dropout outside the layers. Since not all PaddlePaddle layer support dropout, you can add an add_to layer, set dropout here. Please refer to dropout_layer for details. :param name: Layer name. :type name: basestring :param input: Input layers. It could be a LayerOutput or list/tuple of LayerOutput. :type input: LayerOutput|list|tuple :param act: Activation Type, default is tanh. :type act: BaseActivation :param bias_attr: Bias attribute. If False, means no bias. None is default bias. :type bias_attr: ParameterAttribute|bool :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ num_filters = None if isinstance(input, LayerOutput): input = [input] assert isinstance(input, collections.Sequence) ipts_for_layer = [] for each_input in input: assert isinstance(each_input, LayerOutput) ipts_for_layer.append(Input(each_input.name)) if each_input.num_filters is not None: num_filters = each_input.num_filters l = Layer( name=name, type=LayerType.ADDTO_LAYER, inputs=ipts_for_layer, bias=ParamAttr.to_bias(bias_attr), active_type=act.name, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.ADDTO_LAYER, parents=input, activation=act, num_filters=num_filters, size=l.config.size) @wrap_act_default(act=IdentityActivation()) @wrap_name_default("concat") @layer_support() def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None): """ Concat all input vector into one huge vector. Inputs can be list of LayerOutput or list of projection. The example usage is: .. code-block:: python concat = concat_layer(input=[layer1, layer2]) :param name: Layer name. :type name: basestring :param input: input layers or projections :type input: list|tuple|collections.Sequence :param act: Activation type. :type act: BaseActivation :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] elif isinstance(input, Projection): input = [input] else: assert isinstance(input, collections.Sequence) def __is_type__(o, tp): if not isinstance(o, collections.Sequence): if o == tp: return True elif len(o.__bases__) == 0: return False else: for bs in o.__bases__: if __is_type__(bs, tp): return True return False else: tmp = map(lambda _x: __is_type__(_x, tp), o) a = tmp[0] for b in tmp[1:]: assert a == b return a def __reduce_concat_type__(a, b): assert __is_type__([a, b], Projection) or __is_type__([a, b], LayerOutput) return a is_concat_layer = __is_type__( reduce(__reduce_concat_type__, map(type, input)), LayerOutput) layer_type = (LayerType.CONCAT_LAYER if is_concat_layer else LayerType.CONCAT_PROJ_LAYER) if layer_type == LayerType.CONCAT_LAYER: assert not bias_attr Layer( name=name, type=layer_type, inputs=[x.name for x in input] if is_concat_layer else input, active_type=act.name, bias=ParamAttr.to_bias(bias_attr), **ExtraLayerAttribute.to_kwargs(layer_attr)) sz = 0 for each_input in input: if each_input.size is not None: sz += each_input.size else: sz = None break return LayerOutput( name, layer_type=layer_type, parents=input if is_concat_layer else [x.origin for x in input], activation=act, size=sz) def memory(name, size, is_seq=False, boot_layer=None, boot_bias=None, boot_bias_active_type=None, boot_with_const_id=None): """ The memory layers is a layer cross each time step. Reference this output as previous time step layer :code:`name` 's output. The default memory is zero in first time step, previous time step's output in the rest time steps. If boot_bias, the first time step value is this bias and with activation. If boot_with_const_id, then the first time stop is a IndexSlot, the Arguments.ids()[0] is this :code:`cost_id`. If boot_layer is not null, the memory is just the boot_layer's output. Set :code:`is_seq` is true boot layer is sequence. The same name layer in recurrent group will set memory on each time step. :param name: memory's name. :type name: basestring :param size: size of memory. :type size: int :param is_seq: is sequence for boot_layer :type is_seq: bool :param boot_layer: boot layer of memory. :type boot_layer: LayerOutput|None :param boot_bias: boot layer's bias :type boot_bias: ParameterAttribute|None :param boot_bias_active_type: boot layer's active type. :type boot_bias_active_type: BaseActivation :param boot_with_const_id: boot layer's id. :type boot_with_const_id: int :return: LayerOutput object which is a memory. :rtype: LayerOutput """ if boot_bias_active_type is None: boot_bias_active_type = LinearActivation() assert boot_bias is None or isinstance(boot_bias, ParameterAttribute) if isinstance(boot_bias, ParameterAttribute): boot_bias = ParamAttr.to_bias(boot_bias) assert boot_layer is None or isinstance(boot_layer, LayerOutput) agent_name = Memory(name, size, is_seq, boot_layer.name if boot_layer is not None else None, boot_bias, boot_bias_active_type.name, boot_with_const_id) lout = LayerOutput( name=agent_name, size=size, layer_type=LayerType.MEMORY, parents=[boot_layer] if boot_layer is not None else None) return lout @wrap_bias_attr_default() @wrap_act_default( param_names=['gate_act', 'state_act'], act=SigmoidActivation()) @wrap_act_default(act=TanhActivation()) @wrap_name_default('lstm_step') @layer_support() def lstm_step_layer(input, state, size, act=None, name=None, gate_act=None, state_act=None, bias_attr=None, layer_attr=None): """ LSTM Step Layer. It used in recurrent_group. The lstm equations are shown as follow. .. math:: i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i) f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f) c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c) o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o) h_t & = o_t tanh(c_t) The input of lstm step is :math:`Wx_t + Wh_{t-1}`, and user should use :code:`mixed_layer` and :code:`full_matrix_projection` to calculate these input vector. The state of lstm step is :math:`c_{t-1}`. And lstm step layer will do .. math:: i_t = \\sigma(input + W_{ci}c_{t-1} + b_i) ... This layer contains two outputs. Default output is :math:`h_t`. The other output is :math:`o_t`, which name is 'state' and can use :code:`get_output_layer` to extract this output. :param name: Layer's name. :type name: basestring :param size: Layer's size. NOTE: lstm layer's size, should be equal as :code:`input.size/4`, and should be equal as :code:`state.size`. :type size: int :param input: input layer. :math:`Wx_t + Wh_{t-1}` :type input: LayerOutput :param state: State Layer. :math:`c_{t-1}` :type state: LayerOutput :param act: Activation type. Default is tanh :type act: BaseActivation :param gate_act: Gate Activation Type. Default is sigmoid, and should be sigmoid only. :type gate_act: BaseActivation :param state_act: State Activation Type. Default is sigmoid, and should be sigmoid only. :type state_act: BaseActivation :param bias_attr: Bias Attribute. :type bias_attr: ParameterAttribute :param layer_attr: layer's extra attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ Layer( name=name, type=LayerType.LSTM_STEP_LAYER, active_type=act.name, active_gate_type=gate_act.name, active_state_type=state_act.name, bias=ParamAttr.to_bias(bias_attr), size=size, inputs=[input.name, state.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.LSTM_STEP_LAYER, parents=[input, state], activation=act, size=size, outputs=['default', 'state']) @wrap_bias_attr_default() @wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(act=TanhActivation()) @wrap_name_default('gru_step') @layer_support() def gru_step_layer(input, output_mem, size=None, act=None, name=None, gate_act=None, bias_attr=None, layer_attr=None): """ :param input: :type input: LayerOutput :param output_mem: :param size: :param act: :param name: :param gate_act: :param bias_attr: :param layer_attr: :return: LayerOutput object. :rtype: LayerOutput """ assert input.size % 3 == 0 if size is None: size = input.size / 3 Layer( name=name, type=LayerType.GRU_STEP_LAYER, inputs=[input.name, output_mem.name], bias=ParamAttr.to_bias(bias_attr), size=size, active_type=act.name, active_gate_type=gate_act.name, **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.GRU_STEP_LAYER, parents=[input, output_mem], size=size, activation=act) @wrap_name_default() @layer_support() def get_output_layer(input, arg_name, name=None, layer_attr=None): """ Get layer's output by name. In PaddlePaddle, a layer might return multiple values, but returns one layer's output. If the user wants to use another output besides the default one, please use get_output_layer first to get the output from input. :param name: Layer's name. :type name: basestring :param input: get output layer's input. And this layer should contains multiple outputs. :type input: LayerOutput :param arg_name: Output name from input. :type arg_name: basestring :param layer_attr: Layer's extra attribute. :return: LayerOutput object. :rtype: LayerOutput """ # GetOutputLayer assert arg_name in input.outputs, 'Get Output From an not existed input.' \ ' The get output name is %s, which not' \ ' in %s' % ( arg_name, ",".join(input.outputs)) Layer( name=name, type=LayerType.GET_OUTPUT_LAYER, inputs=[Input( input.name, input_layer_argument=arg_name)], size=input.size, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.GET_OUTPUT_LAYER, parents=[input], size=input.size) @wrap_name_default() @wrap_act_default() @wrap_bias_attr_default() @wrap_param_attr_default() @layer_support() def recurrent_layer(input, act=None, bias_attr=None, param_attr=None, name=None, reverse=False, layer_attr=None): """ Simple recurrent unit layer. It is just a fully connect layer through both time and neural network. For each sequence [start, end] it performs the following computation\: .. math:: out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = start \\\\ out_{i} = act(in_{i} + out_{i-1} * W) \\ \\ \\text{for} \\ start < i <= end If reversed is true, the order is reversed\: .. math:: out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = end \\\\ out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end :param input: Input Layer :type input: LayerOutput :param act: activation. :type act: BaseActivation :param bias_attr: bias attribute. :type bias_attr: ParameterAttribute :param param_attr: parameter attribute. :type param_attr: ParameterAttribute :param name: name of the layer :type name: basestring :param layer_attr: Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ Layer( name=name, type=LayerType.RECURRENT_LAYER, inputs=Input(input.name, **param_attr.attr), active_type=act.name, bias=ParamAttr.to_bias(bias_attr), reversed=reverse, **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.RECURRENT_LAYER, parents=[input], size=input.size, activation=act, reverse=reverse) class StaticInput(object): """ StaticInput is only used in recurrent_group which defines a read-only memory that can be a sequence or non-sequence. """ def __init__(self, input, is_seq=False, size=None): assert isinstance(input, LayerOutput) self.input = input self.is_seq = is_seq assert input.size is not None or size is not None if size is not None: input.size = size class SubsequenceInput(object): """ Input sequence has sub-sequence, used in recurrent_group. The example usage is: .. code-block:: python input = SubsequenceInput(layer) """ def __init__(self, input): assert isinstance(input, LayerOutput) assert input.size is not None self.input = input @wrap_name_default("recurrent_group") def recurrent_group(step, input, reverse=False, name=None, targetInlink=None, is_generating=False): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a time step, PaddlePaddle will iterate such a recurrent calculation over sequence input. This is extremely usefull for attention based model, or Neural Turning Machine like models. The basic usage (time steps) is: .. code-block:: python def step(input): output = fc_layer(input=layer, size=1024, act=LinearActivation(), bias_attr=False) return output group = recurrent_group(input=layer, step=step) You can see following configs for further usages: - time steps: lstmemory_group, paddle/gserver/tests/sequence_layer_group.conf, \ demo/seqToseq/seqToseq_net.py - sequence steps: paddle/gserver/tests/sequence_nest_layer_group.conf :param step: recurrent one time step function.The input of this function is input of the group. The return of this function will be recurrent group's return value. The recurrent group scatter a sequence into time steps. And for each time step, will invoke step function, and return a time step result. Then gather each time step of output into layer group's output. :type step: callable :param name: recurrent_group's name. :type name: basestring :param input: Input links array. LayerOutput will be scattered into time steps. SubsequenceInput will be scattered into sequence steps. StaticInput will be imported to each time step, and doesn't change through time. It's a mechanism to access layer outside step function. :type input: LayerOutput|StaticInput|SubsequenceInput|list|tuple :param reverse: If reverse is set true, the recurrent unit will process the input sequence in a reverse order. :type reverse: bool :param targetInlink: the input layer which share info with layer group's output Param input specifies multiple input layers. For SubsequenceInput inputs, config should assign one input layer that share info(the number of sentences and the number of words in each sentence) with all layer group's outputs. targetInlink should be one of the layer group's input. :type targetInlink: LayerOutput|SubsequenceInput :param is_generating: If is generating, none of input type should be LayerOutput; else, for training or testing, one of the input type must be LayerOutput. : type is_generating: bool :return: LayerOutput object. :rtype: LayerOutput """ model_type('recurrent_nn') def is_single_input(x): return isinstance(x, LayerOutput) or isinstance(x, StaticInput) \ or isinstance(x, SubsequenceInput) if is_single_input(input): input = [input] assert isinstance(input, collections.Sequence) def is_in_links(x): return isinstance(x, LayerOutput) or isinstance(x, SubsequenceInput) in_links = filter(is_in_links, input) def targetInlink_in_inlinks(): for inlink in in_links: if isinstance(inlink, SubsequenceInput): if targetInlink == inlink.input: return True elif targetInlink == inlink: return True return False assert (targetInlink == None or targetInlink_in_inlinks()) targetInlinkName = None if targetInlink == None \ else targetInlink.name if isinstance(targetInlink, LayerOutput) \ else targetInlink.input.name contains_sub_seq = [False] def map_in_links(x): if isinstance(x, SubsequenceInput): contains_sub_seq[0] = True return Link(name=x.input.name, has_subseq=True) else: return x.name RecurrentLayerGroupWithoutOutLinksBegin( name=name, in_links=map(map_in_links, in_links), seq_reversed=reverse, target_inlinkname=targetInlinkName) in_args = [] has_LayerOutput = False for each_input in input: assert is_single_input(each_input) if isinstance(each_input, LayerOutput): in_args.append(each_input) has_LayerOutput = True elif isinstance(each_input, SubsequenceInput): in_args.append(each_input.input) has_LayerOutput = True else: mem_name = "__%s_memory__" % each_input.input.name mem = memory( name=mem_name, is_seq=each_input.is_seq, size=each_input.input.size, boot_layer=each_input.input) with mixed_layer( name=mem_name, size=each_input.input.size, act=IdentityActivation()) as mix: mix += identity_projection(mem) in_args.append(mem) assert (is_generating != has_LayerOutput) layer_outs = step(*in_args) if isinstance(layer_outs, LayerOutput): layer_outs = [layer_outs] for ot in layer_outs: assert isinstance(ot, LayerOutput) ot.reverse = reverse if contains_sub_seq[0]: RecurrentLayerGroupSetOutLink(Link(ot.name, has_subseq=True)) else: RecurrentLayerGroupSetOutLink(ot.name) RecurrentLayerGroupEnd(name=name) if len(layer_outs) == 1: return layer_outs[0] else: return layer_outs class BaseGeneratedInput(object): def __init__(self): self.bos_id = None self.eos_id = None def before_real_step(self): raise NotImplementedError() def after_real_step(self, *args): raise NotImplementedError() class GeneratedInput(BaseGeneratedInput): def after_real_step(self, input): return maxid_layer(input=input, name='__beam_search_predict__') def before_real_step(self): predict_id = memory( name='__beam_search_predict__', size=self.size, boot_with_const_id=self.bos_id) trg_emb = embedding_layer( input=predict_id, size=self.embedding_size, param_attr=ParamAttr(name=self.embedding_name)) return trg_emb def __init__(self, size, embedding_name, embedding_size): super(GeneratedInput, self).__init__() self.size = size self.embedding_name = embedding_name self.embedding_size = embedding_size @wrap_name_default() def maxid_layer(input, name=None, layer_attr=None): """ A layer for finding the id which has the maximal value for each sample. The result is stored in output.ids. The example usage is: .. code-block:: python maxid = maxid_layer(input=layer) :param input: Input layer name. :type input: LayerOutput :param name: Layer name. :type name: basestring :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) l = Layer( name=name, type='maxid', inputs=[input.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.MAXID_LAYER, parents=[input], size=l.config.size) @wrap_name_default() def out_prod_layer(input1, input2, name=None, layer_attr=None): """ A layer for computing the outer product of two vectors The result is a matrix of size(input1) x size(input2) The example usage is: .. code-block:: python out_prod = out_prod_layer(input1=vec1, input2=vec2) :param name: Layer name. :type name: basestring :param input1: The first input layer name. :type input: LayerOutput :param input2: The second input layer name. :type input2: LayerOutput :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input1, LayerOutput) assert isinstance(input2, LayerOutput) l = Layer( name=name, type=LayerType.OUT_PROD_LAYER, inputs=[input1.name, input2.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.OUT_PROD_LAYER, parents=[input1, input2], size=l.config.size) @wrap_name_default() def eos_layer(input, eos_id, name=None, layer_attr=None): """ A layer for checking EOS for each sample: - output_id = (input_id == conf.eos_id) The result is stored in output\_.ids. It is used by recurrent layer group. The example usage is: .. code-block:: python eos = eos_layer(input=layer, eos_id=id) :param name: Layer name. :type name: basestring :param input: Input layer name. :type input: LayerOutput :param eos_id: end id of sequence :type eos_id: int :param layer_attr: extra layer attributes. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput """ l = Layer( name=name, type=LayerType.EOSID_LAYER, eos_id=eos_id, inputs=[input.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name=name, layer_type=LayerType.EOSID_LAYER, parents=[input], size=l.config.size) @wrap_name_default() def beam_search(step, input, bos_id, eos_id, beam_size, max_length=500, name=None, num_results_per_sample=None): """ Beam search is a heuristic search algorithm used in sequence generation. It explores a graph by expanding the most promising nodes in a limited set to maintain tractability. The example usage is: .. code-block:: python def rnn_step(input): last_time_step_output = memory(name='rnn', size=512) with mixed_layer(size=512, name='rnn') as simple_rnn: simple_rnn += full_matrix_projection(input) simple_rnn += last_time_step_output return simple_rnn beam_gen = beam_search(name="decoder", step=rnn_step, input=[StaticInput(encoder_last)], bos_id=0, eos_id=1, beam_size=5) Please see the following demo for more details: - machine translation : demo/seqToseq/translation/gen.conf \ demo/seqToseq/seqToseq_net.py :param name: Name of the recurrent unit that generates sequences. :type name: base string :param step: A callable function that defines the calculation in a time step, and it is applied to sequences with arbitrary length by sharing a same set of weights. You can refer to the first parameter of recurrent_group, or demo/seqToseq/seqToseq_net.py for more details. :type step: callable :param input: Input data for the recurrent unit :type input: list :param bos_id: Index of the start symbol in the dictionary. The start symbol is a special token for NLP task, which indicates the beginning of a sequence. In the generation task, the start symbol is essential, since it is used to initialize the RNN internal state. :type bos_id: int :param eos_id: Index of the end symbol in the dictionary. The end symbol is a special token for NLP task, which indicates the end of a sequence. The generation process will stop once the end symbol is generated, or a pre-defined max iteration number is exceeded. :type eos_id: int :param max_length: Max generated sequence length. :type max_length: int :param beam_size: Beam search for sequence generation is an iterative search algorithm. To maintain tractability, every iteration only only stores a predetermined number, called the beam_size, of the most promising next words. The greater the beam size, the fewer candidate words are pruned. :type beam_size: int :param num_results_per_sample: Number of the generated results per input sequence. This number must always be less than beam size. :type num_results_per_sample: int :return: The generated word index. :rtype: LayerOutput """ if num_results_per_sample is None: num_results_per_sample = beam_size if num_results_per_sample > beam_size: logger.warning("num_results_per_sample should be less than beam_size") if isinstance(input, StaticInput) or isinstance(input, BaseGeneratedInput): input = [input] generated_input_index = -1 real_input = [] for i, each_input in enumerate(input): assert isinstance(each_input, StaticInput) or isinstance( each_input, BaseGeneratedInput) if isinstance(each_input, BaseGeneratedInput): assert generated_input_index == -1 generated_input_index = i else: real_input.append(each_input) assert generated_input_index != -1 gipt = input[generated_input_index] assert isinstance(gipt, BaseGeneratedInput) gipt.bos_id = bos_id gipt.eos_id = eos_id def __real_step__(*args): eos_name = "__%s_eos_layer__" % name RecurrentLayerGroupSetGenerator( Generator( eos_layer_name=eos_name, max_num_frames=max_length, beam_size=beam_size, num_results_per_sample=num_results_per_sample)) args = list(args) args.insert(generated_input_index, gipt.before_real_step()) predict = gipt.after_real_step(step(*args)) eos_layer(input=predict, eos_id=eos_id, name=eos_name) return predict tmp = recurrent_group( step=__real_step__, input=real_input, reverse=False, name=name, is_generating=True) return tmp def __cost_input__(input, label, weight=None): """ inputs and parents for cost layers. """ ipts = [Input(input.name), Input(label.name)] parents = [input, label] if weight is not None: assert weight.layer_type == LayerType.DATA ipts.append(Input(weight.name)) parents.append(weight) return ipts, parents @wrap_name_default() @layer_support() def regression_cost(input, label, weight=None, name=None, layer_attr=None): """ Regression Layer. TODO(yuyang18): Complete this method. :param name: layer name. :type name: basestring :param input: Network prediction. :type input: LayerOutput :param label: Data label. :type label: LayerOutput :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput :param layer_attr: layer's extra attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ ipts, parents = __cost_input__(input, label, weight) Layer( inputs=ipts, type="square_error", name=name, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.COST, parents=parents, size=1) @wrap_name_default("cost") @layer_support() def classification_cost(input, label, weight=None, name=None, evaluator=classification_error_evaluator, layer_attr=None): """ classification cost Layer. :param name: layer name. :type name: basestring :param input: input layer name. network output. :type input: LayerOutput :param label: label layer name. data_layer often. :type label: LayerOutput :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput :param evaluator: Evaluator method. :param layer_attr: layer's extra attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert input.layer_type != LayerType.DATA assert isinstance(input.activation, SoftmaxActivation) assert label.layer_type == LayerType.DATA ipts, parents = __cost_input__(input, label, weight) Layer( name=name, type="multi-class-cross-entropy", inputs=ipts, **ExtraLayerAttribute.to_kwargs(layer_attr)) def __add_evaluator__(e): assert callable(e) assert hasattr(e, 'is_evaluator') assert isinstance(e.is_evaluator, bool) assert e.is_evaluator assert hasattr(e, "for_classification") assert isinstance(e.for_classification, bool) assert e.for_classification e(name=e.__name__, input=input, label=label, weight=weight) if not isinstance(evaluator, collections.Sequence): evaluator = [evaluator] for each_evaluator in evaluator: __add_evaluator__(each_evaluator) return LayerOutput(name, LayerType.COST, parents=parents, size=1) def conv_operator(img, filter, filter_size, num_filters, num_channels=None, stride=1, padding=0, filter_size_y=None, stride_y=None, padding_y=None): """ Different from img_conv_layer, conv_op is an Operator, which can be used in mixed_layer. And conv_op takes two inputs to perform convolution. The first input is the image and the second is filter kernel. It only support GPU mode. The example usage is: .. code-block:: python op = conv_operator(img=input1, filter=input2, filter_size=3, num_filters=64, num_channels=64) :param img: input image :type img: LayerOutput :param filter: input filter :type filter: LayerOutput :param filter_size: The x dimension of a filter kernel. :type filter_size: int :param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle now supports rectangular filters, the filter's shape can be (filter_size, filter_size_y). :type filter_size_y: int :param num_filters: channel of output data. :type num_filters: int :param num_channels: channel of input data. :type num_channels: int :param stride: The x dimension of the stride. :type stride: int :param stride_y: The y dimension of the stride. :type stride_y: int :param padding: The x dimension of padding. :type padding: int :param padding_y: The y dimension of padding. :type padding_y: int :return: A ConvOperator Object. :rtype: ConvOperator """ if filter_size_y is None: filter_size_y = filter_size if stride_y is None: stride_y = stride if padding_y is None: padding_y = padding if num_channels is None: num_channels = img.num_filters assert isinstance(filter, LayerOutput) if filter.size is not None: filter.size = filter_size * filter_size_y * num_filters * num_channels op = ConvOperator( input_layer_names=[img.name, filter.name], num_filters=num_filters, conv_conf=Conv( filter_size=filter_size, padding=padding, stride=stride, channels=num_channels, filter_size_y=filter_size_y, padding_y=padding_y, stride_y=stride_y, groups=1)) op.origin = [img, filter] return op @wrap_param_attr_default() def conv_projection(input, filter_size, num_filters, num_channels=None, stride=1, padding=0, filter_size_y=None, stride_y=None, padding_y=None, groups=1, param_attr=None): """ ConvProjection with a layer as input. It performs element-wise multiplication with weight. Different from img_conv_layer and conv_op, conv_projection is an Projection, which can be used in mixed_layer and conat_layer. It use cudnn to implement conv and only support GPU mode. The example usage is: .. code-block:: python proj = conv_projection(img=input1, filter_size=3, num_filters=64, num_channels=64) :param input: input layer :type input: LayerOutput :param filter_size: The x dimension of a filter kernel. :type filter_size: int :param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle now supports rectangular filters, the filter's shape can be (filter_size, filter_size_y). :type filter_size_y: int :param num_filters: channel of output data. :type num_filters: int :param num_channels: channel of input data. :type num_channels: int :param stride: The x dimension of the stride. :type stride: int :param stride_y: The y dimension of the stride. :type stride_y: int :param padding: The x dimension of padding. :type padding: int :param padding_y: The y dimension of padding. :type padding_y: int :param groups: The group number. :type groups: int :param param_attr: Convolution param attribute. None means default attribute :type param_attr: ParameterAttribute :return: A DotMulProjection Object. :rtype: DotMulProjection """ if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters if filter_size_y is None: if isinstance(filter_size, collections.Sequence): assert len(filter_size) == 2 filter_size, filter_size_y = filter_size else: filter_size_y = filter_size if stride_y is None: if isinstance(stride, collections.Sequence): assert len(stride) == 2 stride, stride_y = stride else: stride_y = stride if padding_y is None: if isinstance(padding, collections.Sequence): assert len(padding) == 2 padding, padding_y = padding else: padding_y = padding if param_attr.attr.get('initial_smart'): # special initial for conv layers. init_w = (2.0 / (filter_size**2 * num_channels))**0.5 param_attr.attr["initial_mean"] = 0.0 param_attr.attr["initial_std"] = init_w param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False proj = ConvProjection( input_layer_name=input.name, num_filters=num_filters, conv_conf=Conv( filter_size=filter_size, padding=padding, stride=stride, channels=num_channels, filter_size_y=filter_size_y, padding_y=padding_y, stride_y=stride_y, groups=groups), **param_attr.attr) proj.origin = input return proj @wrap_name_default() @layer_support() def conv_shift_layer(a, b, name=None, layer_attr=None): """ This layer performs cyclic convolution for two input. For example: - a[in]: contains M elements. - b[in]: contains N elements (N should be odd). - c[out]: contains M elements. .. math:: c[i] = \sum_{j=-(N-1)/2}^{(N-1)/2}a_{i+j} * b_{j} In this formular: - a's index is computed modulo M. When it is negative, then get item from the right side (which is the end of array) to the left. - b's index is computed modulo N. When it is negative, then get item from the right size (which is the end of array) to the left. The example usage is: .. code-block:: python conv_shift = conv_shift_layer(input=[layer1, layer2]) :param name: layer name :type name: basestring :param a: Input layer a. :type a: LayerOutput :param b: input layer b :type b: LayerOutput :param layer_attr: layer's extra attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput) assert b.size is None or b.size % 2 == 1 # size of b must be odd. Layer( name=name, type=LayerType.CONV_SHIFT_LAYER, inputs=[a.name, b.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.CONV_SHIFT_LAYER, parents=[a, b], size=a.size) @wrap_name_default() @wrap_param_attr_default() @wrap_bias_attr_default() @wrap_act_default(act=LinearActivation()) @layer_support(ERROR_CLIPPING, DROPOUT) def tensor_layer(a, b, size, act=None, name=None, param_attr=None, bias_attr=None, layer_attr=None): """ This layer performs tensor operation for two input. For example, each sample: .. math:: y_{i} = a * W_{i} * {b^\mathrm{T}}, i=0,1,...,K-1 In this formular: - :math:`a`: the first input contains M elements. - :math:`b`: the second input contains N elements. - :math:`y_{i}`: the i-th element of y. - :math:`W_{i}`: the i-th learned weight, shape if [M, N] - :math:`b^\mathrm{T}`: the transpose of :math:`b_{2}`. The simple usage is: .. code-block:: python tensor = tensor_layer(a=layer1, b=layer2, size=1000) :param name: layer name :type name: basestring :param a: Input layer a. :type a: LayerOutput :param b: input layer b. :type b: LayerOutput :param size: the layer dimension. :type size: int. :param act: Activation Type. Default is tanh. :type act: BaseActivation :param param_attr: The Parameter Attribute. :type param_attr: ParameterAttribute :param bias_attr: The Bias Attribute. If no bias, then pass False or something not type of ParameterAttribute. None will get a default Bias. :type bias_attr: ParameterAttribute|None|Any :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput) Layer( name=name, size=size, type=LayerType.TENSOR_LAYER, active_type=act.name, bias=ParamAttr.to_bias(bias_attr), inputs=[Input(a.name, **param_attr.attr), Input(b.name)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.TENSOR_LAYER, parents=[a, b], activation=act, size=size) @wrap_name_default() @wrap_param_attr_default() @wrap_bias_attr_default() @wrap_act_default() @layer_support() def selective_fc_layer(input, select, size, act=None, name=None, pass_generation=False, has_selected_colums=True, mul_ratio=0.02, param_attr=None, bias_attr=None, layer_attr=None): """ Selectived fully connected layer. Different from fc_layer, the output of this layer maybe sparse. It requires an additional input to indicate several selected columns for output. If the selected columns is not specified, selective_fc_layer acts exactly like fc_layer. The simple usage is: .. code-block:: python sel_fc = selective_fc_layer(input=input, size=128, act=TanhActivation()) :param name: The Layer Name. :type name: basestring :param input: The input layer. :type input: LayerOutput|list|tuple :param select: The select layer. The output of select layer should be a sparse binary matrix, and treat as the mask of selective fc. :type select: LayerOutput :param size: The layer dimension. :type size: int :param act: Activation Type. Default is tanh. :type act: BaseActivation :param param_attr: The Parameter Attribute. :type param_attr: ParameterAttribute :param bias_attr: The Bias Attribute. If no bias, then pass False or something not type of ParameterAttribute. None will get a default Bias. :type bias_attr: ParameterAttribute|None|Any :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] assert not isinstance(param_attr, collections.Sequence) param_attr = [param_attr] else: if isinstance(param_attr, collections.Sequence): assert len(input) == len(param_attr) else: param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))] assert isinstance(input, collections.Sequence) assert isinstance(select, LayerOutput) if select.size is not None: assert select.size == size Layer( inputs=[ Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr) ] + [select.name], name=name, type=LayerType.SEL_FC_LAYER, size=size, bias=ParameterAttribute.to_bias(bias_attr), active_type=act.name, selective_fc_pass_generation=pass_generation, has_selected_colums=has_selected_colums, selective_fc_full_mul_ratio=mul_ratio, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SEL_FC_LAYER, list(input) + [select], activation=act, size=size) @wrap_name_default() @layer_support() def sampling_id_layer(input, name=None, layer_attr=None): """ A layer for sampling id from multinomial distribution from the input layer. Sampling one id for one sample. The simple usage is: .. code-block:: python samping_id = sampling_id_layer(input=input) :param input: The input layer. :type input: LayerOutput :param name: The Layer Name. :type name: basestring :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ l = Layer( name=name, type=LayerType.SAMPLING_ID_LAYER, inputs=[Input(input.name)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SAMPLING_ID_LAYER, input, size=l.config.size) @wrap_name_default() @layer_support() def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0, layer_attr=None): """ This layer for applying a slope and an intercept to the input element-wise. There is no activation and weight. .. math:: y = slope * x + intercept The simple usage is: .. code-block:: python scale = slope_intercept_layer(input=input, slope=-1.0, intercept=1.0) :param input: The input layer. :type input: LayerOutput :param name: The Layer Name. :type name: basestring :param slope: the scale factor. :type slope: float. :param intercept: the offset. :type intercept: float. :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ Layer( name=name, type=LayerType.SLOPE_INTERCEPT_LAYER, slope=slope, intercept=intercept, inputs=[Input(input.name)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.SLOPE_INTERCEPT_LAYER, input, size=input.size) @wrap_name_default() @layer_support() def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None): """ A layer for weighted sum of vectors takes two inputs. - Input: size of weights is M size of vectors is M*N - Output: a vector of size=N .. math:: z(i) = \sum_{j=0}^{M-1} x(j) y(i+Nj) where :math:`0 \le i \le N-1` Or in the matrix notation: .. math:: z = x^\mathrm{T} Y In this formular: - :math:`x`: weights - :math:`y`: vectors. - :math:`z`: the output. Note that the above computation is for one sample. Multiple samples are processed in one batch. The simple usage is: .. code-block:: python linear_comb = linear_comb_layer(weights=weight, vectors=vectors, size=elem_dim) :param weights: The weight layer. :type weights: LayerOutput :param vectors: The vector layer. :type vectors: LayerOutput :param size: the dimension of this layer. :type size: int :param name: The Layer Name. :type name: basestring :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(weights, LayerOutput) and isinstance(vectors, LayerOutput) if vectors.size is not None and weights.size is not None: assert vectors.size % weights.size == 0 if size is None: size = vectors.size / weights.size else: assert size == vectors.size / weights.size Layer( name=name, type=LayerType.LINEAR_COMBINATION_LAYER, size=size, inputs=[Input(weights.name), Input(vectors.name)], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.LINEAR_COMBINATION_LAYER, [weights, vectors], size=size) convex_comb_layer = linear_comb_layer @wrap_name_default() @layer_support() def block_expand_layer(input, block_x=0, block_y=0, stride_x=0, stride_y=0, padding_x=0, padding_y=0, num_channels=None, name=None, layer_attr=None): """ Expand feature map to minibatch matrix. - matrix width is: block_y * block_x * num_channels - matirx height is: outputH * outputW .. math:: outputH = 1 + (2 * padding_y + imgSizeH - block_y + stride_y - 1) / stride_y outputW = 1 + (2 * padding_x + imgSizeW - block_x + stride_x - 1) / stride_x The expand method is the same with ExpandConvLayer, but saved the transposed value. After expanding, output.sequenceStartPositions will store timeline. The number of time steps are outputH * outputW and the dimension of each time step is block_y * block_x * num_channels. This layer can be used after convolution neural network, and before recurrent neural network. The simple usage is: .. code-block:: python block_expand = block_expand_layer(input, num_channels=128, stride_x=1, stride_y=1, block_x=1, block_x=3) :param input: The input layer. :type input: LayerOutput :param num_channels: The channel number of input layer. :type num_channels: int|None :param block_x: The width of sub block. :type block_x: int :param block_y: The width of sub block. :type block_y: int :param stride_x: The stride size in horizontal direction. :type stride_x: int :param stride_y: The stride size in vertical direction. :type stride_y: int :param padding_x: The padding size in horizontal direction. :type padding_x: int :param padding_y: The padding size in vertical direction. :type padding_y: int :param name: The name of this layer, which can not specify. :type name: None|basestring. :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters l = Layer( name=name, inputs=Input( input.name, block_expand=BlockExpand( channels=num_channels, block_x=block_x, block_y=block_y, stride_x=stride_x, stride_y=stride_y, padding_x=padding_x, padding_y=padding_y)), type=LayerType.BLOCK_EXPAND, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.BLOCK_EXPAND, parents=[input], size=l.config.size) @wrap_name_default() @layer_support() def maxout_layer(input, groups, num_channels=None, size_x=None, size_y=None, name=None, layer_attr=None): """ A layer to do max out on conv layer output. - Input: output of a conv layer. - Output: feature map size same as input. Channel is (input channel) / groups. So groups should be larger than 1, and the num of channels should be able to devided by groups. Please refer to Paper: - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf - Multi-digit Number Recognition from Street View \ Imagery using Deep Convolutional Neural Networks: \ https://arxiv.org/pdf/1312.6082v4.pdf The simple usage is: .. code-block:: python maxout = maxout_layer(input, num_channels=128, groups=4) :param input: The input layer. :type input: LayerOutput :param num_channels: The channel number of input layer. If None will be set automatically from previous output. :type num_channels: int|None :param groups: The group number of input layer. :type groups: int :param size_x: conv output width. If None will be set automatically from previous output. :type size_x: int|None :param size_y: conv output height. If None will be set automatically from previous output. :type size_y: int|None :param name: The name of this layer, which can not specify. :type name: None|basestring. :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert input.layer_type == LayerType.CONV_LAYER assert isinstance(input.activation, LinearActivation) assert groups > 1 if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters assert num_channels % groups == 0 l = Layer( name=name, inputs=Input( input.name, maxout=MaxOut( channels=num_channels, groups=groups)), type=LayerType.MAXOUT, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.MAXOUT, parents=[input], size=l.config.size) @wrap_name_default() @layer_support() def ctc_layer(input, label, size=None, name=None, norm_by_times=False, layer_attr=None): """ Connectionist Temporal Classification (CTC) is designed for temporal classication task. That is, for sequence labeling problems where the alignment between the inputs and the target labels is unknown. More details can be found by referring to `Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks <http://machinelearning.wustl.edu/mlpapers/paper_files/ icml2006_GravesFGS06.pdf>`_ Note: Considering the 'blank' label needed by CTC, you need to use (num_classes + 1) as the input size. num_classes is the category number. And the 'blank' is the last category index. So the size of 'input' layer, such as fc_layer with softmax activation, should be num_classes + 1. The size of ctc_layer should also be num_classes + 1. The simple usage: .. code-block:: python ctc = ctc_layer(input=input, label=label, size=9055, norm_by_times=True) :param input: The input layer. :type input: LayerOutput :param label: The data layer of label with variable length. :type label: LayerOutput :param size: category numbers + 1. :type size: int :param name: The name of this layer :type name: basestring|None :param norm_by_times: Whether to normalization by times. False by default. :type norm_by_times: bool :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) assert isinstance(label, LayerOutput) if label.size is not None: if size is not None: assert size == label.size + 1 else: size = label.size + 1 Layer( name=name, type=LayerType.CTC_LAYER, size=size, norm_by_times=norm_by_times, inputs=[input.name, label.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.CTC_LAYER, [input, label], size=size) @wrap_name_default() @layer_support() def warp_ctc_layer(input, label, size=None, name=None, blank=0, norm_by_times=False, layer_attr=None): """ A layer intergrating the open-source `warp-ctc <https://github.com/baidu-research/warp-ctc>` library, which is used in `Deep Speech 2: End-toEnd Speech Recognition in English and Mandarin <https://arxiv.org/pdf/1512.02595v1.pdf>`, to compute Connectionist Temporal Classification (CTC) loss. More details of CTC can be found by referring to `Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks <http://machinelearning.wustl.edu/mlpapers/paper_files/ icml2006_GravesFGS06.pdf>`_ Note: - Let num_classes represent the category number. Considering the 'blank' label needed by CTC, you need to use (num_classes + 1) as the input size. Thus, the size of both warp_ctc_layer and 'input' layer should be set to num_classes + 1. - You can set 'blank' to any value ranged in [0, num_classes], which should be consistent as that used in your labels. - As a native 'softmax' activation is interated to the warp-ctc library, 'linear' activation is expected instead in the 'input' layer. The simple usage: .. code-block:: python ctc = warp_ctc_layer(input=input, label=label, size=1001, blank=1000, norm_by_times=False) :param input: The input layer. :type input: LayerOutput :param label: The data layer of label with variable length. :type label: LayerOutput :param size: category numbers + 1. :type size: int :param name: The name of this layer, which can not specify. :type name: basestring|None :param blank: the 'blank' label used in ctc :type blank: int :param norm_by_times: Whether to normalization by times. False by default. :type norm_by_times: bool :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) assert isinstance(label, LayerOutput) if label.size is not None: if size is not None: assert size == label.size + 1 else: size = label.size + 1 Layer( name=name, type=LayerType.WARP_CTC_LAYER, size=size, blank=blank, norm_by_times=norm_by_times, inputs=[input.name, label.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.WARP_CTC_LAYER, parents=[input, label], size=size) @wrap_name_default() @wrap_param_attr_default() @layer_support() def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, layer_attr=None): """ A layer for calculating the cost of sequential conditional random field model. The simple usage: .. code-block:: python crf = crf_layer(input=input, label=label, size=label_dim) :param input: The first input layer is the feature. :type input: LayerOutput :param label: The second input layer is label. :type label: LayerOutput :param size: The category number. :type size: int :param weight: The third layer is "weight" of each sample, which is an optional argument. :type weight: LayerOutput :param param_attr: Parameter attribute. None means default attribute :type param_attr: ParameterAttribute :param name: The name of this layers. It is not necessary. :type name: None|basestring :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) assert isinstance(label, LayerOutput) assert weight is None or isinstance(weight, LayerOutput) if input.size is not None and label.size is not None: assert input.size == label.size if size is None: size = input.size else: assert size == input.size ipts = [Input(input.name, **param_attr.attr), Input(label.name)] if weight is not None: ipts.append(Input(weight.name)) Layer( name=name, type=LayerType.CRF_LAYER, size=size, inputs=ipts, **ExtraLayerAttribute.to_kwargs(layer_attr)) parents = [input, label] if weight is not None: parents.append(weight) # The size for LayerOutput means the dimension of the output. # It's different from the meaning of crf layer, which is the number of # classes. return LayerOutput(name, LayerType.CRF_LAYER, parents, size=1) @wrap_name_default() @wrap_param_attr_default() @layer_support() def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, layer_attr=None): """ A layer for calculating the decoding sequence of sequential conditional random field model. The decoding sequence is stored in output.ids. If a second input is provided, it is treated as the ground-truth label, and this layer will also calculate error. output.value[i] is 1 for incorrect decoding or 0 for correct decoding. :param input: The first input layer. :type input: LayerOutput :param size: size of this layer. :type size: int :param label: None or ground-truth label. :type label: LayerOutput or None :param param_attr: Parameter attribute. None means default attribute :type param_attr: ParameterAttribute :param name: The name of this layers. It is not necessary. :type name: None|basestring :param layer_attr: Extra Layer config. :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) assert label is None or isinstance(label, LayerOutput) ipts = [Input(input.name, **param_attr.attr)] if label is not None: ipts.append(Input(label.name)) Layer( name=name, type=LayerType.CRF_DECODING_LAYER, size=size, inputs=ipts, **ExtraLayerAttribute.to_kwargs(layer_attr)) parents = [input] if label is not None: parents.append(label) # The size for LayerOutput means the dimension of the output. # It's different from the meaning of crf layer, which is the number of # classes. return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1) @wrap_bias_attr_default(has_bias=True) @wrap_name_default() @layer_support() def nce_layer(input, label, num_classes, weight=None, num_neg_samples=10, neg_distribution=None, name=None, bias_attr=None, layer_attr=None): """ Noise-contrastive estimation. Implements the method in the following paper: A fast and simple algorithm for training neural probabilistic language models. The example usage is: .. code-block:: python cost = nce_layer(input=layer1, label=layer2, weight=layer3, num_classes=3, neg_distribution=[0.1,0.3,0.6]) :param name: layer name :type name: basestring :param input: input layers. It could be a LayerOutput of list/tuple of LayerOutput. :type input: LayerOutput|list|tuple|collections.Sequence :param label: label layer :type label: LayerOutput :param weight: weight layer, can be None(default) :type weight: LayerOutput :param num_classes: number of classes. :type num_classes: int :param num_neg_samples: number of negative samples. Default is 10. :type num_neg_samples: int :param neg_distribution: The distribution for generating the random negative labels. A uniform distribution will be used if not provided. If not None, its length must be equal to num_classes. :type neg_distribution: list|tuple|collections.Sequence|None :param bias_attr: Bias parameter attribute. True if no bias. :type bias_attr: ParameterAttribute|None|False :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: layer name. :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] assert isinstance(input, collections.Sequence) assert isinstance(label, LayerOutput) assert label.layer_type == LayerType.DATA if neg_distribution is not None: assert isinstance(neg_distribution, collections.Sequence) assert len(neg_distribution) == num_classes assert sum(neg_distribution) == 1 ipts_for_layer = [] parents = [] for each_input in input: assert isinstance(each_input, LayerOutput) ipts_for_layer.append(each_input.name) parents.append(each_input) ipts_for_layer.append(label.name) parents.append(label) if weight is not None: assert isinstance(weight, LayerOutput) assert weight.layer_type == LayerType.DATA ipts_for_layer.append(weight.name) parents.append(weight) l = Layer( name=name, type=LayerType.NCE_LAYER, num_classes=num_classes, neg_sampling_dist=neg_distribution, num_neg_samples=num_neg_samples, inputs=ipts_for_layer, bias=ParamAttr.to_bias(bias_attr), **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.NCE_LAYER, parents=parents, size=l.config.size) """ following are cost Layers. """ @wrap_name_default() @layer_support() def rank_cost(left, right, label, weight=None, name=None, coeff=1.0, layer_attr=None): """ A cost Layer for learning to rank using gradient descent. Details can refer to `papers <http://research.microsoft.com/en-us/um/people/cburges/papers/ ICML_ranking.pdf>`_. This layer contains at least three inputs. The weight is an optional argument, which affects the cost. .. math:: C_{i,j} & = -\\tilde{P_{ij}} * o_{i,j} + log(1 + e^{o_{i,j}}) o_{i,j} & = o_i - o_j \\tilde{P_{i,j}} & = \\{0, 0.5, 1\\} \ or \ \\{0, 1\\} In this formula: - :math:`C_{i,j}` is the cross entropy cost. - :math:`\\tilde{P_{i,j}}` is the label. 1 means positive order and 0 means reverse order. - :math:`o_i` and :math:`o_j`: the left output and right output. Their dimension is one. The simple usage: .. code-block:: python cost = rank_cost(left=out_left, right=out_right, label=label) :param left: The first input, the size of this layer is 1. :type left: LayerOutput :param right: The right input, the size of this layer is 1. :type right: LayerOutput :param label: Label is 1 or 0, means positive order and reverse order. :type label: LayerOutput :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput :param name: The name of this layers. It is not necessary. :type name: None|basestring :param coeff: The coefficient affects the gradient in the backward. :type coeff: float :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert left.size == 1 assert right.size == 1 assert label.size == 1 ipts = [left.name, right.name, label.name] parents = [left, right, label] if weight is not None: ipts.append(weight.name) parents.append(weight) Layer( name=name, type=LayerType.RANK_COST, inputs=ipts, coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.RANK_COST, parents=parents, size=1) @wrap_name_default() @layer_support() def lambda_cost(input, score, name, NDCG_num=5, max_sort_size=-1, layer_attr=None): """ lambdaCost for lambdaRank LTR approach. The simple usage: .. code-block:: python cost = lambda_cost(input=input, score=score, NDCG_num=8, max_sort_size=-1) :param input: Samples of the same query should be loaded as sequence. :type input: LayerOutput :param score: The 2nd input. Score of each sample. :type input: LayerOutput :param NDCG_num: The size of NDCG (Normalized Discounted Cumulative Gain), e.g., 5 for NDCG@5. It must be less than for equal to the minimum size of lists. :type NDCG_num: int :param max_sort_size: The size of partial sorting in calculating gradient. If max_sort_size = -1, then for each list, the algorithm will sort the entire list to get gradient. In other cases, max_sort_size must be greater than or equal to NDCG_num. And if max_sort_size is greater than the size of a list, the algorithm will sort the entire list of get gradient. :type max_sort_size: int :param name: The name of this layers. It is not necessary. :type name: None|basestring :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ assert isinstance(input, LayerOutput) and isinstance(score, LayerOutput) if score.size is not None: assert score.size == 1 Layer( name=name, type=LayerType.LAMBDA_COST, inputs=[input.name, score.name], NDCG_num=NDCG_num, max_sort_size=max_sort_size, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.LAMBDA_COST, parents=[input, score], size=1) @wrap_name_default() @layer_support() def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None): """ A loss layer for multi class entropy. .. code-block:: python cost = cross_entropy(input=input_layer, label=label_layer) :param input: The first input layer. :type input: LayerOutput. :param label: The input label. :type input: LayerOutput. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param coeff: The coefficient affects the gradient in the backward. :type coeff: float. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput. """ Layer( name=name, type=LayerType.CROSS_ENTROPY, inputs=[input.name, label.name], coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.CROSS_ENTROPY, parents=[input, label], size=1) @wrap_name_default() @layer_support() def cross_entropy_with_selfnorm(input, label, name=None, coeff=1.0, softmax_selfnorm_alpha=0.1, layer_attr=None): """ A loss layer for multi class entropy with selfnorm. .. code-block:: python cost = cross_entropy_with_selfnorm(input=input_layer, label=label_layer) :param input: The first input layer. :type input: LayerOutput. :param label: The input label. :type input: LayerOutput. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param coeff: The coefficient affects the gradient in the backward. :type coeff: float. :param softmax_selfnorm_alpha: The scale factor affects the cost. :type softmax_selfnorm_alpha: float. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput. """ Layer( name=name, type=LayerType.CROSS_ENTROPY_WITH_SELFNORM, inputs=[input.name, label.name], coeff=coeff, softmax_selfnorm_alpha=softmax_selfnorm_alpha, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.CROSS_ENTROPY_WITH_SELFNORM, parents=[input, label], size=1) @wrap_name_default() @layer_support() def sum_cost(input, name=None, layer_attr=None): """ A loss layer which calculate the sum of the input as loss .. code-block:: python cost = sum_cost(input=input_layer) :param input: The first input layer. :type input: LayerOutput. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput. """ assert isinstance(input, LayerOutput) Layer( name=name, type=LayerType.SUM_COST, inputs=[input.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.SUM_COST, parents=[input], size=1) @wrap_name_default() @layer_support() def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None): """ A loss layer for huber loss. .. code-block:: python cost = huber_cost(input=input_layer, label=label_layer) :param input: The first input layer. :type input: LayerOutput. :param label: The input label. :type input: LayerOutput. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param coeff: The coefficient affects the gradient in the backward. :type coeff: float. :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput. """ assert isinstance(input, LayerOutput) if input.size is not None: assert input.size == 1 Layer( name=name, type=LayerType.HUBER, inputs=[input.name, label.name], coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.HUBER, parents=[input, label], size=1) @wrap_name_default() @layer_support() def multi_binary_label_cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None): """ A loss layer for multi binary label cross entropy. .. code-block:: python cost = multi_binary_label_cross_entropy(input=input_layer, label=label_layer) :param input: The first input layer. :type input: LayerOutput :param label: The input label. :type input: LayerOutput :param type: The type of cost. :type type: basestring :param name: The name of this layers. It is not necessary. :type name: None|basestring :param coeff: The coefficient affects the gradient in the backward. :type coeff: float :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ if input.activation is None or \ not isinstance(input.activation, SigmoidActivation): logger.log( logging.WARN, "%s is not recommend for multi_binary_label_cross_entropy's activation, " "maybe the sigmoid is better" % repr(input.activation)) Layer( name=name, type=LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY, inputs=[input.name, label.name], coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY, parents=[input, label], size=1)
opticron/asterisk-testsuite-temporary
refs/heads/master
tests/channels/pjsip/transfers/blind_transfer/callee_refer_only/transfer.py
2
#!/usr/bin/env python ''' Copyright (C) 2014, Digium, Inc. John Bigelow <jbigelow@digium.com> This program is free software, distributed under the terms of the GNU General Public License Version 2. ''' import logging import pjsua as pj from twisted.internet import reactor LOGGER = logging.getLogger(__name__) URI = ["sip:bob@127.0.0.1", "sip:bob_two@127.0.0.1", "sip:charlie@127.0.0.1"] ITERATION = 0 class CharlieCallback(pj.AccountCallback): """Derived callback class for Charlie's account.""" def __init__(self, controller, account=None): pj.AccountCallback.__init__(self, account) self.controller = controller self.charlie_call = None def on_incoming_call2(self, call, msg): self.charlie_call = call LOGGER.info("Incoming call for Charlie '%s' from '%s'." % (call.info().uri, call.info().remote_uri)) if ITERATION > 0: referred_by_hdr = "Referred-By: <sip:bob@127.0.0.1;ob>" if (referred_by_hdr not in msg.msg_info_buffer): LOGGER.warn("Expected header not found: '%s'" % referred_by_hdr) self.controller.test_object.set_passed(False) self.controller.test_object.stop_reactor() inbound_cb = CharliePhoneCallCallback(call) call.set_callback(inbound_cb) call.answer(200) reactor.callLater(1, self.hangup_call) def hangup_call(self): """Hang up the call.""" LOGGER.info("Hanging up Charlie") self.charlie_call.hangup(code=200, reason="Q.850;cause=16") class BobCallback(pj.AccountCallback): """Derived callback class for Bob's account.""" def __init__(self, account=None): pj.AccountCallback.__init__(self, account) self.bob_call = None def on_incoming_call(self, call): self.bob_call = call LOGGER.info("Incoming call for Bob '%s' from '%s'." % (call.info().uri, call.info().remote_uri)) inbound_cb = BobPhoneCallCallback(call) call.set_callback(inbound_cb) call.answer(200) class AlicePhoneCallCallback(pj.CallCallback): """Derived callback class for Alice's call.""" def __init__(self, call=None): pj.CallCallback.__init__(self, call) def on_state(self): log_call_info(self.call.info()) if self.call.info().state == pj.CallState.DISCONNECTED: LOGGER.info("Call disconnected: '%s'" % self.call) class BobPhoneCallCallback(pj.CallCallback): """Derived callback class for Bob's call.""" def __init__(self, call=None): pj.CallCallback.__init__(self, call) def on_state(self): log_call_info(self.call.info()) if self.call.info().state == pj.CallState.CONFIRMED: LOGGER.info("Call is up between Alice and Bob. Transferring call" \ " to Charlie.") self.transfer_call() if self.call.info().state == pj.CallState.DISCONNECTED: LOGGER.info("Call disconnected: '%s'" % self.call) def transfer_call(self): """Transfer the call.""" try: LOGGER.info("Attempting to blind transfer the call.") self.call.transfer(URI[2]) LOGGER.info("The call is %s" % self.call) except: LOGGER.warn("Failed to transfer the call! Retrying...") reactor.callLater(.2, self.transfer_call) def on_transfer_status(self, code, reason, final, cont): log_call_info(self.call.info()) if code == 200 and reason == "OK" and final == 1 and cont == 0: LOGGER.info("Transfer target answered the call.") LOGGER.debug("Call uri: '%s'; remote uri: '%s'" % (self.call.info().uri, self.call.info().remote_uri)) LOGGER.info("Hanging up Bob") self.call.hangup(code=200, reason="Q.850;cause=16") return cont class CharliePhoneCallCallback(pj.CallCallback): """Derived callback class for Charlie's call.""" def __init__(self, call=None): pj.CallCallback.__init__(self, call) def on_state(self): log_call_info(self.call.info()) if self.call.info().state == pj.CallState.DISCONNECTED: LOGGER.info("Call disconnected: '%s'" % self.call) class AMICallback(object): """Class to set up callbacks and place calls.""" def __init__(self, test_object, accounts): self.test_object = test_object self.ami = self.test_object.ami[0] self.ami.registerEvent('Hangup', self.hangup_event_handler) self.alice = accounts.get('alice') bob = accounts.get('bob') charlie = accounts.get('charlie') self.bob_cb = BobCallback() self.charlie_cb = CharlieCallback(self) bob.account.set_callback(self.bob_cb) charlie.account.set_callback(self.charlie_cb) self.channels_hungup = 0 def hangup_event_handler(self, ami, event): """AMI hang up event callback.""" global ITERATION LOGGER.debug("Hangup detected for channel '%s'" % event['channel']) self.channels_hungup += 1 if self.channels_hungup == 3 and ITERATION == 0: LOGGER.info("Starting second iteration.") self.channels_hungup = 0 ITERATION += 1 lock = self.alice.pj_lib.auto_lock() self.make_call(self.alice.account, URI[1]) del lock elif self.channels_hungup == 3 and ITERATION == 1: self.test_object.stop_reactor() def make_call(self, acc, uri): """Place a call. Keyword Arguments: acc The pjsua to make the call from uri The URI to dial """ try: LOGGER.info("Making call to '%s'" % uri) acc.make_call(uri, cb=AlicePhoneCallCallback()) except pj.Error, err: LOGGER.error("Exception: %s" % str(err)) def log_call_info(call_info): """Log call info.""" LOGGER.debug("Call '%s' <-> '%s'" % (call_info.uri, call_info.remote_uri)) LOGGER.debug("Call state: '%s'; last code: '%s'; last reason: '%s'" % (call_info.state_text, call_info.last_code, call_info.last_reason)) def transfer(test_object, accounts): """The test's callback method. Keyword Arguments: test_object The test object accounts Configured accounts """ LOGGER.info("Starting first iteration.") alice = accounts.get('alice') obj = AMICallback(test_object, accounts) lock = alice.pj_lib.auto_lock() obj.make_call(accounts['alice'].account, URI[0]) del lock # vim:sw=4:ts=4:expandtab:textwidth=79
asadoughi/python-neutronclient
refs/heads/master
neutronclient/openstack/common/strutils.py
1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import re import sys import unicodedata import six from neutronclient.openstack.common.gettextutils import _ # noqa # Used for looking up extensions of text # to their 'multiplied' byte amount BYTE_MULTIPLIERS = { '': 1, 't': 1024 ** 4, 'g': 1024 ** 3, 'm': 1024 ** 2, 'k': 1024, } BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") def int_from_bool_as_string(subject): """Interpret a string as a boolean and return either 1 or 0. Any string value in: ('True', 'true', 'On', 'on', '1') is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing """ return bool_from_string(subject) and 1 or 0 def bool_from_string(subject, strict=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else is considered False. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = str(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return False def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming str using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an isntance of str """ if not isinstance(text, six.string_types): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors) def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): """Encodes incoming str/unicode using `encoding`. If incoming is not specified, text is expected to be encoded with current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. :raises TypeError: If text is not an isntance of str """ if not isinstance(text, six.string_types): raise TypeError(_("%s can't be encoded") % type(text).capitalize()) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) if isinstance(text, six.text_type): return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) return text def to_bytes(text, default=0): """Converts a string into an integer of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) :param text: String input for bytes size conversion. :param default: Default return value when text is blank. """ match = BYTE_REGEX.search(text) if match: magnitude = int(match.group(1)) mult_key_org = match.group(2) if not mult_key_org: return magnitude elif text: msg = _('Invalid string format: %s') % text raise TypeError(msg) else: return default mult_key = mult_key_org.lower().replace('b', '', 1) multiplier = BYTE_MULTIPLIERS.get(mult_key) if multiplier is None: msg = _('Unknown byte multiplier: %s') % mult_key_org raise TypeError(msg) return magnitude * multiplier def to_slug(value, incoming=None, errors="strict"): """Normalize string. Convert to lowercase, remove non-word characters, and convert spaces to hyphens. Inspired by Django's `slugify` filter. :param value: Text to slugify :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: slugified unicode representation of `value` :raises TypeError: If text is not an instance of str """ value = safe_decode(value, incoming, errors) # NOTE(aababilov): no need to use safe_(encode|decode) here: # encodings are always "ascii", error handling is always "ignore" # and types are always known (first: unicode; second: str) value = unicodedata.normalize("NFKD", value).encode( "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value)
Turlough/keyczar
refs/heads/master
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/ifort.py
19
"""SCons.Tool.ifort Tool-specific initialization for newer versions of the Intel Fortran Compiler for Linux/Windows (and possibly Mac OS X). There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/ifort.py 4043 2009/02/23 09:06:45 scons" import string import SCons.Defaults from SCons.Scanner.Fortran import FortranScan from FortranCommon import add_all_to_env def generate(env): """Add Builders and construction variables for ifort to an Environment.""" # ifort supports Fortran 90 and Fortran 95 # Additionally, ifort recognizes more file extensions. fscan = FortranScan("FORTRANPATH") SCons.Tool.SourceFileScanner.add_scanner('.i', fscan) SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan) if not env.has_key('FORTRANFILESUFFIXES'): env['FORTRANFILESUFFIXES'] = ['.i'] else: env['FORTRANFILESUFFIXES'].append('.i') if not env.has_key('F90FILESUFFIXES'): env['F90FILESUFFIXES'] = ['.i90'] else: env['F90FILESUFFIXES'].append('.i90') add_all_to_env(env) fc = 'ifort' for dialect in ['F77', 'F90', 'FORTRAN', 'F95']: env['%s' % dialect] = fc env['SH%s' % dialect] = '$%s' % dialect env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect) if env['PLATFORM'] == 'win32': # On Windows, the ifort compiler specifies the object on the # command line with -object:, not -o. Massage the necessary # command-line construction variables. for dialect in ['F77', 'F90', 'FORTRAN', 'F95']: for var in ['%sCOM' % dialect, '%sPPCOM' % dialect, 'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]: env[var] = string.replace(env[var], '-o $TARGET', '-object:$TARGET') env['FORTRANMODDIRPREFIX'] = "/module:" else: env['FORTRANMODDIRPREFIX'] = "-module " def exists(env): return env.Detect('ifort') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
ParadropLabs/Paradrop
refs/heads/master
tests/paradrop/core/container/test_dockerfile.py
1
from paradrop.core.container.dockerfile import Dockerfile from paradrop.core.chute.service import Service def test_get_target_image(): from paradrop.core.container import dockerfile assert dockerfile.get_target_image("go").startswith("golang:") assert dockerfile.get_target_image("node").startswith("node:") assert dockerfile.get_target_image("python2").startswith("python:2") assert dockerfile.get_target_image("python3").startswith("python:3") assert dockerfile.get_target_image("unknown").startswith("unknown") def test_get_target_machine(): from paradrop.core.container import dockerfile result = dockerfile.get_target_machine() assert isinstance(result, basestring) and len(result) > 0 def test_getString(): service = Service(image="python2", command="python") dockerfile = Dockerfile(service) result = dockerfile.getString() assert "FROM" in result assert "CMD" in result def test_isValid(): # Missing required fields. service = Service() dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is False assert reason is not None # Command is not a string or list. service.image = "python2" service.command = 42 dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is False assert reason is not None # Valid service.command = "python" dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is True assert reason is None # Packages is not a list. service.build['packages'] = 42 dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is False assert reason is not None # Packages contains a weird value. service.build['packages'] = ["something\nfunny"] dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is False assert reason is not None # Valid service.build['packages'] = ["a", "ab", "abc"] dockerfile = Dockerfile(service) valid, reason = dockerfile.isValid() assert valid is True assert reason is None
Weicong-Lin/pymo-global
refs/heads/master
android/pgs4a-0.9.6/python-install/lib/python2.7/distutils/tests/test_build.py
141
"""Tests for distutils.command.build.""" import unittest import os import sys from test.test_support import run_unittest from distutils.command.build import build from distutils.tests import support from sysconfig import get_platform class BuildTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase): def test_finalize_options(self): pkg_dir, dist = self.create_dist() cmd = build(dist) cmd.finalize_options() # if not specified, plat_name gets the current platform self.assertEqual(cmd.plat_name, get_platform()) # build_purelib is build + lib wanted = os.path.join(cmd.build_base, 'lib') self.assertEqual(cmd.build_purelib, wanted) # build_platlib is 'build/lib.platform-x.x[-pydebug]' # examples: # build/lib.macosx-10.3-i386-2.7 plat_spec = '.%s-%s' % (cmd.plat_name, sys.version[0:3]) if hasattr(sys, 'gettotalrefcount'): self.assertTrue(cmd.build_platlib.endswith('-pydebug')) plat_spec += '-pydebug' wanted = os.path.join(cmd.build_base, 'lib' + plat_spec) self.assertEqual(cmd.build_platlib, wanted) # by default, build_lib = build_purelib self.assertEqual(cmd.build_lib, cmd.build_purelib) # build_temp is build/temp.<plat> wanted = os.path.join(cmd.build_base, 'temp' + plat_spec) self.assertEqual(cmd.build_temp, wanted) # build_scripts is build/scripts-x.x wanted = os.path.join(cmd.build_base, 'scripts-' + sys.version[0:3]) self.assertEqual(cmd.build_scripts, wanted) # executable is os.path.normpath(sys.executable) self.assertEqual(cmd.executable, os.path.normpath(sys.executable)) def test_suite(): return unittest.makeSuite(BuildTestCase) if __name__ == "__main__": run_unittest(test_suite())
atpohjal/or-tools
refs/heads/master
data/nonogram_regular/nonogram_gondola.py
74
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Gondola # From http://www.conceptispuzzles.com # rows = 30 row_rule_len = 8 row_rules = [ [0,0,0,0,0,0,5,6], [0,0,0,0,6,1,1,1], [0,0,0,0,0,3,11,3], [0,0,6,1,1,1,1,1], [0,7,1,1,1,2,1,3], [0,0,4,1,1,2,1,4], [0,7,1,1,1,2,3,1], [0,0,7,1,1,3,1,1], [0,0,4,1,1,1,1,9], [0,0,0,0,4,8,1,1], [0,0,0,4,1,4,1,3], [0,0,0,4,1,7,1,5], [4,1,1,2,1,4,1,1], [0,0,0,4,9,2,1,2], [0,0,4,1,3,1,2,1], [0,0,4,1,6,1,1,1], [0,0,0,0,4,8,3,1], [0,0,0,0,10,3,5,3], [0,0,4,1,2,3,5,2], [0,0,0,0,3,5,2,8], [0,0,0,2,6,3,1,1], [0,0,0,0,0,1,12,1], [0,0,0,0,0,20,1,1], [0,0,0,0,0,0,2,25], [0,0,0,0,0,2,3,20], [2,5,3,2,2,2,2,1], [0,0,0,0,0,1,2,22], [0,0,0,0,0,0,0,20], [0,0,0,0,0,0,3,18], [0,0,0,0,0,0,1,2] ] cols = 30 col_rule_len = 8 col_rules = [ [0,0,2,2,2,1,2,1], [0,0,0,2,2,2,1,2], [0,0,0,2,2,2,3,1], [0,0,0,0,0,18,2,1], [0,0,0,0,0,23,1,1], [0,0,0,0,0,20,2,1], [0,0,0,0,0,0,16,4], [0,0,0,0,0,0,2,6], [0,0,0,0,0,1,7,8], [0,0,3,1,1,8,2,1], [0,0,0,1,1,7,9,1], [0,0,0,0,7,1,1,15], [0,0,1,1,3,1,12,3], [0,1,1,1,1,3,2,8], [0,1,1,1,2,3,4,8], [0,1,1,1,1,3,1,14], [0,0,0,0,7,6,8,3], [0,0,0,0,0,1,4,9], [0,0,0,1,2,1,1,7], [0,0,0,0,5,1,3,3], [0,0,0,0,0,2,1,6], [0,0,0,0,0,5,2,6], [0,0,0,0,1,4,2,3], [0,0,0,0,0,1,7,8], [0,0,0,0,7,4,5,6], [2,1,1,1,2,3,3,3], [0,0,0,7,2,1,1,6], [0,1,1,2,1,1,1,6], [0,2,1,1,1,3,2,3], [0,0,0,0,1,1,9,6] ]
leo23/locust
refs/heads/master
locust/test/test_locust_class.py
32
import unittest from locust.core import HttpLocust, Locust, TaskSet, task, events from locust import ResponseError, InterruptTaskSet from locust.exception import CatchResponseError, RescheduleTask, RescheduleTaskImmediately, LocustError from testcases import LocustTestCase, WebserverTestCase class TestTaskSet(LocustTestCase): def setUp(self): super(TestTaskSet, self).setUp() class User(Locust): host = "127.0.0.1" self.locust = User() def test_task_ratio(self): t1 = lambda l: None t2 = lambda l: None class MyTasks(TaskSet): tasks = {t1:5, t2:2} l = MyTasks(self.locust) t1_count = len([t for t in l.tasks if t == t1]) t2_count = len([t for t in l.tasks if t == t2]) self.assertEqual(t1_count, 5) self.assertEqual(t2_count, 2) def test_task_decorator_ratio(self): t1 = lambda l: None t2 = lambda l: None class MyTasks(TaskSet): tasks = {t1:5, t2:2} host = "" @task(3) def t3(self): pass @task(13) def t4(self): pass l = MyTasks(self.locust) t1_count = len([t for t in l.tasks if t == t1]) t2_count = len([t for t in l.tasks if t == t2]) t3_count = len([t for t in l.tasks if t.__name__ == MyTasks.t3.__name__]) t4_count = len([t for t in l.tasks if t.__name__ == MyTasks.t4.__name__]) self.assertEqual(t1_count, 5) self.assertEqual(t2_count, 2) self.assertEqual(t3_count, 3) self.assertEqual(t4_count, 13) def test_on_start(self): class MyTasks(TaskSet): t1_executed = False t2_executed = False def on_start(self): self.t1() def t1(self): self.t1_executed = True @task def t2(self): self.t2_executed = True raise InterruptTaskSet(reschedule=False) l = MyTasks(self.locust) self.assertRaises(RescheduleTask, lambda: l.run()) self.assertTrue(l.t1_executed) self.assertTrue(l.t2_executed) def test_schedule_task(self): self.t1_executed = False self.t2_arg = None def t1(l): self.t1_executed = True def t2(l, arg): self.t2_arg = arg class MyTasks(TaskSet): tasks = [t1, t2] taskset = MyTasks(self.locust) taskset.schedule_task(t1) taskset.execute_next_task() self.assertTrue(self.t1_executed) taskset.schedule_task(t2, args=["argument to t2"]) taskset.execute_next_task() self.assertEqual("argument to t2", self.t2_arg) def test_schedule_task_with_kwargs(self): class MyTasks(TaskSet): @task def t1(self): self.t1_executed = True @task def t2(self, *args, **kwargs): self.t2_args = args self.t2_kwargs = kwargs loc = MyTasks(self.locust) loc.schedule_task(loc.t2, [42], {"test_kw":"hello"}) loc.execute_next_task() self.assertEqual((42, ), loc.t2_args) self.assertEqual({"test_kw":"hello"}, loc.t2_kwargs) loc.schedule_task(loc.t2, args=[10, 4], kwargs={"arg1":1, "arg2":2}) loc.execute_next_task() self.assertEqual((10, 4), loc.t2_args) self.assertEqual({"arg1":1, "arg2":2}, loc.t2_kwargs) def test_schedule_task_bound_method(self): class MyTasks(TaskSet): host = "" @task() def t1(self): self.t1_executed = True self.schedule_task(self.t2) def t2(self): self.t2_executed = True taskset = MyTasks(self.locust) taskset.schedule_task(taskset.get_next_task()) taskset.execute_next_task() self.assertTrue(taskset.t1_executed) taskset.execute_next_task() self.assertTrue(taskset.t2_executed) def test_taskset_inheritance(self): def t1(l): pass class MyBaseTaskSet(TaskSet): tasks = [t1] host = "" class MySubTaskSet(MyBaseTaskSet): @task def t2(self): pass l = MySubTaskSet(self.locust) self.assertEqual(2, len(l.tasks)) self.assertEqual([t1, MySubTaskSet.t2.__func__], l.tasks) def test_task_decorator_with_or_without_argument(self): class MyTaskSet(TaskSet): @task def t1(self): pass taskset = MyTaskSet(self.locust) self.assertEqual(len(taskset.tasks), 1) class MyTaskSet2(TaskSet): @task() def t1(self): pass taskset = MyTaskSet2(self.locust) self.assertEqual(len(taskset.tasks), 1) class MyTaskSet3(TaskSet): @task(3) def t1(self): pass taskset = MyTaskSet3(self.locust) self.assertEqual(len(taskset.tasks), 3) def test_sub_taskset(self): class MySubTaskSet(TaskSet): min_wait=1 max_wait=1 @task() def a_task(self): self.locust.sub_locust_task_executed = True self.interrupt() class MyTaskSet(TaskSet): tasks = [MySubTaskSet] self.sub_locust_task_executed = False loc = MyTaskSet(self.locust) loc.schedule_task(loc.get_next_task()) self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task()) self.assertTrue(self.locust.sub_locust_task_executed) def test_sub_taskset_tasks_decorator(self): class MyTaskSet(TaskSet): @task class MySubTaskSet(TaskSet): min_wait=1 max_wait=1 @task() def a_task(self): self.locust.sub_locust_task_executed = True self.interrupt() self.sub_locust_task_executed = False loc = MyTaskSet(self.locust) loc.schedule_task(loc.get_next_task()) self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task()) self.assertTrue(self.locust.sub_locust_task_executed) def test_sub_taskset_arguments(self): class MySubTaskSet(TaskSet): min_wait=1 max_wait=1 @task() def a_task(self): self.locust.sub_taskset_args = self.args self.locust.sub_taskset_kwargs = self.kwargs self.interrupt() class MyTaskSet(TaskSet): sub_locust_args = None sub_locust_kwargs = None tasks = [MySubTaskSet] self.locust.sub_taskset_args = None self.locust.sub_taskset_kwargs = None loc = MyTaskSet(self.locust) loc.schedule_task(MySubTaskSet, args=[1,2,3], kwargs={"hello":"world"}) self.assertRaises(RescheduleTaskImmediately, lambda: loc.execute_next_task()) self.assertEqual((1,2,3), self.locust.sub_taskset_args) self.assertEqual({"hello":"world"}, self.locust.sub_taskset_kwargs) def test_interrupt_taskset_in_main_taskset(self): class MyTaskSet(TaskSet): @task def interrupted_task(self): raise InterruptTaskSet(reschedule=False) class MyLocust(Locust): host = "http://127.0.0.1" task_set = MyTaskSet class MyTaskSet2(TaskSet): @task def interrupted_task(self): self.interrupt() class MyLocust2(Locust): host = "http://127.0.0.1" task_set = MyTaskSet2 l = MyLocust() l2 = MyLocust2() self.assertRaises(LocustError, lambda: l.run()) self.assertRaises(LocustError, lambda: l2.run()) try: l.run() except LocustError as e: self.assertTrue("MyLocust" in e.args[0], "MyLocust should have been referred to in the exception message") self.assertTrue("MyTaskSet" in e.args[0], "MyTaskSet should have been referred to in the exception message") except: raise try: l2.run() except LocustError as e: self.assertTrue("MyLocust2" in e.args[0], "MyLocust2 should have been referred to in the exception message") self.assertTrue("MyTaskSet2" in e.args[0], "MyTaskSet2 should have been referred to in the exception message") except: raise def test_on_start_interrupt(self): class SubTaskSet(TaskSet): def on_start(self): if self.kwargs["reschedule"]: self.interrupt(reschedule=True) else: self.interrupt(reschedule=False) class MyLocust(Locust): host = "" task_set = SubTaskSet l = MyLocust() task_set = SubTaskSet(l) self.assertRaises(RescheduleTaskImmediately, lambda: task_set.run(reschedule=True)) self.assertRaises(RescheduleTask, lambda: task_set.run(reschedule=False)) def test_parent_attribute(self): from locust.exception import StopLocust parents = {} class SubTaskSet(TaskSet): def on_start(self): parents["sub"] = self.parent @task class SubSubTaskSet(TaskSet): def on_start(self): parents["subsub"] = self.parent @task def stop(self): raise StopLocust() class RootTaskSet(TaskSet): tasks = [SubTaskSet] class MyLocust(Locust): host = "" task_set = RootTaskSet l = MyLocust() l.run() self.assertTrue(isinstance(parents["sub"], RootTaskSet)) self.assertTrue(isinstance(parents["subsub"], SubTaskSet)) class TestWebLocustClass(WebserverTestCase): def test_get_request(self): self.response = "" def t1(l): self.response = l.client.get("/ultra_fast") class MyLocust(HttpLocust): tasks = [t1] host = "http://127.0.0.1:%i" % self.port my_locust = MyLocust() t1(my_locust) self.assertEqual(self.response.content, "This is an ultra fast response") def test_client_request_headers(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test":"hello"}).content) def test_client_get(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("GET", locust.client.get("/request_method").content) def test_client_get_absolute_url(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).content) def test_client_post(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("POST", locust.client.post("/request_method", {"arg":"hello world"}).content) self.assertEqual("hello world", locust.client.post("/post", {"arg":"hello world"}).content) def test_client_put(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("PUT", locust.client.put("/request_method", {"arg":"hello world"}).content) self.assertEqual("hello world", locust.client.put("/put", {"arg":"hello world"}).content) def test_client_delete(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual("DELETE", locust.client.delete("/request_method").content) self.assertEqual(200, locust.client.delete("/request_method").status_code) def test_client_head(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port locust = MyLocust() self.assertEqual(200, locust.client.head("/request_method").status_code) def test_client_basic_auth(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port class MyAuthorizedLocust(HttpLocust): host = "http://locust:menace@127.0.0.1:%i" % self.port class MyUnauthorizedLocust(HttpLocust): host = "http://locust:wrong@127.0.0.1:%i" % self.port locust = MyLocust() unauthorized = MyUnauthorizedLocust() authorized = MyAuthorizedLocust() self.assertEqual("Authorized", authorized.client.get("/basic_auth").content) self.assertFalse(locust.client.get("/basic_auth")) self.assertFalse(unauthorized.client.get("/basic_auth")) def test_log_request_name_argument(self): from locust.stats import RequestStats, global_stats self.response = "" class MyLocust(HttpLocust): tasks = [] host = "http://127.0.0.1:%i" % self.port @task() def t1(l): self.response = l.client.get("/ultra_fast", name="new name!") my_locust = MyLocust() my_locust.t1() self.assertEqual(1, global_stats.get("new name!", "GET").num_requests) self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) def test_locust_client_error(self): class MyTaskSet(TaskSet): @task def t1(self): self.client.get("/") self.interrupt() class MyLocust(Locust): host = "http://127.0.0.1:%i" % self.port task_set = MyTaskSet my_locust = MyLocust() self.assertRaises(LocustError, lambda: my_locust.client.get("/")) my_taskset = MyTaskSet(my_locust) self.assertRaises(LocustError, lambda: my_taskset.client.get("/")) def test_redirect_url_original_path_as_name(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port l = MyLocust() l.client.get("/redirect") from locust.stats import global_stats self.assertEqual(1, len(global_stats.entries)) self.assertEqual(1, global_stats.get("/redirect", "GET").num_requests) self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) class TestCatchResponse(WebserverTestCase): def setUp(self): super(TestCatchResponse, self).setUp() class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port self.locust = MyLocust() self.num_failures = 0 self.num_success = 0 def on_failure(request_type, name, response_time, exception): self.num_failures += 1 self.last_failure_exception = exception def on_success(**kwargs): self.num_success += 1 events.request_failure += on_failure events.request_success += on_success def test_catch_response(self): self.assertEqual(500, self.locust.client.get("/fail").status_code) self.assertEqual(1, self.num_failures) self.assertEqual(0, self.num_success) with self.locust.client.get("/ultra_fast", catch_response=True) as response: pass self.assertEqual(1, self.num_failures) self.assertEqual(1, self.num_success) with self.locust.client.get("/ultra_fast", catch_response=True) as response: raise ResponseError("Not working") self.assertEqual(2, self.num_failures) self.assertEqual(1, self.num_success) def test_catch_response_http_fail(self): with self.locust.client.get("/fail", catch_response=True) as response: pass self.assertEqual(1, self.num_failures) self.assertEqual(0, self.num_success) def test_catch_response_http_manual_fail(self): with self.locust.client.get("/ultra_fast", catch_response=True) as response: response.failure("Haha!") self.assertEqual(1, self.num_failures) self.assertEqual(0, self.num_success) self.assertTrue( isinstance(self.last_failure_exception, CatchResponseError), "Failure event handler should have been passed a CatchResponseError instance" ) def test_catch_response_http_manual_success(self): with self.locust.client.get("/fail", catch_response=True) as response: response.success() self.assertEqual(0, self.num_failures) self.assertEqual(1, self.num_success) def test_catch_response_allow_404(self): with self.locust.client.get("/does/not/exist", catch_response=True) as response: self.assertEqual(404, response.status_code) if response.status_code == 404: response.success() self.assertEqual(0, self.num_failures) self.assertEqual(1, self.num_success) def test_interrupt_taskset_with_catch_response(self): class MyTaskSet(TaskSet): @task def interrupted_task(self): with self.client.get("/ultra_fast", catch_response=True) as r: raise InterruptTaskSet() class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port task_set = MyTaskSet l = MyLocust() ts = MyTaskSet(l) self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task()) self.assertEqual(0, self.num_failures) self.assertEqual(0, self.num_success) def test_catch_response_connection_error_success(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:1" l = MyLocust() with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) r.success() self.assertEqual(1, self.num_success) self.assertEqual(0, self.num_failures) def test_catch_response_connection_error_fail(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:1" l = MyLocust() with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) r.success() self.assertEqual(1, self.num_success) self.assertEqual(0, self.num_failures)
hylje/Lyra
refs/heads/master
testproject/settings.py
1
# Django settings for testproject project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS from os import path PROJECT_ROOT = path.dirname(path.abspath(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': PROJECT_ROOT + '/development.db', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'f%u=h3b0q^=&hc#0@s76l$em)qh#&d1s1kz2i2!um^$j7*j%4(' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'testproject.urls' TEMPLATE_DIRS = ( PROJECT_ROOT + "/templates", # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'lyra', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
marscher/PyEMMA
refs/heads/devel
pyemma/__init__.py
1
# This file is part of PyEMMA. # # Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER) # # PyEMMA is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ======================================= PyEMMA - Emma's Markov Model Algorithms ======================================= """ # set version from versioneer. from ._version import get_versions __version__ = get_versions()['version'] version = __version__ del get_versions from .util import config from . import coordinates from . import msm from . import util from . import plots from . import thermo def load(filename, model_name='default'): """ Restores a previously saved model or estimator from disk. Parameters ---------- filename : str path to filename, where the model has been stored. model_name: str, default='default' if multiple models are contained in the file, these can be accessed by their name. Use func:`pyemma.list_models` to get a representation of all stored models. Returns ------- obj : Model or Estimator the instance containing the same parameters as the saved model/estimator. """ from ._base.serialization.serialization import SerializableMixIn return SerializableMixIn.load(file_name=filename, model_name=model_name) def list_models(filename): """ Lists all models in given filename. Parameters ---------- filename: str path to filename, where the model has been stored. Returns ------- obj: dict A mapping by name and a comprehensive description like this: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...} """ from ._base.serialization.h5file import H5File with H5File(filename) as f: return f.models_descriptive def _version_check(current, testing=False): """ checks latest version online from http://emma-project.org. Can be disabled by setting config.check_version = False. >>> from mock import patch >>> import warnings, pyemma >>> with warnings.catch_warnings(record=True) as cw, patch('pyemma.version', '0.1'): ... warnings.simplefilter('always', UserWarning) ... v = pyemma.version ... t = pyemma._version_check(v, testing=True) ... t.start() ... t.join() ... assert cw, "no warning captured" ... assert "latest release" in str(cw[0].message), "wrong msg" """ if not config.check_version: class _dummy: def start(self): pass return _dummy() import json import platform import os from distutils.version import LooseVersion as parse from contextlib import closing import threading import uuid import sys if 'pytest' in sys.modules or os.getenv('CI', False): testing = True def _impl(): import warnings from six.moves.urllib.request import urlopen, Request import six try: r = Request('http://emma-project.org/versions.json', headers={'User-Agent': 'PyEMMA-{emma_version}-Py-{python_version}-{platform}-{addr}' .format(emma_version=current, python_version=platform.python_version(), platform=platform.platform(terse=True), addr=uuid.getnode())} if not testing else {}) with closing(urlopen(r, timeout=30)) as response: args = {'encoding':'ascii'} if six.PY3 else {} payload = str(response.read(), **args) # py3: encoding ascii versions = json.loads(payload) latest_json = tuple(filter(lambda x: x['latest'], versions))[0]['version'] latest = parse(latest_json) if parse(current) < latest: warnings.warn("You are not using the latest release of PyEMMA." " Latest is {latest}, you have {current}." .format(latest=latest, current=current), category=UserWarning) if sys.version_info[0] < 3: warnings.warn("Python 2.7 usage is deprecated. " "Future versions of PyEMMA will not support it. " "Please upgrade your Python installation.", category=UserWarning) except Exception: import logging logging.getLogger('pyemma').debug("error during version check", exc_info=True) return threading.Thread(target=_impl) # start check in background _version_check(version).start()
webmasterraj/FogOrNot
refs/heads/master
flask/lib/python2.7/site-packages/numpy/core/machar.py
141
""" Machine arithmetics - determine the parameters of the floating-point arithmetic system Author: Pearu Peterson, September 2003 """ from __future__ import division, absolute_import, print_function __all__ = ['MachAr'] from numpy.core.fromnumeric import any from numpy.core.numeric import errstate # Need to speed this up...especially for longfloat class MachAr(object): """ Diagnosing machine parameters. Attributes ---------- ibeta : int Radix in which numbers are represented. it : int Number of base-`ibeta` digits in the floating point mantissa M. machep : int Exponent of the smallest (most negative) power of `ibeta` that, added to 1.0, gives something different from 1.0 eps : float Floating-point number ``beta**machep`` (floating point precision) negep : int Exponent of the smallest power of `ibeta` that, substracted from 1.0, gives something different from 1.0. epsneg : float Floating-point number ``beta**negep``. iexp : int Number of bits in the exponent (including its sign and bias). minexp : int Smallest (most negative) power of `ibeta` consistent with there being no leading zeros in the mantissa. xmin : float Floating point number ``beta**minexp`` (the smallest [in magnitude] usable floating value). maxexp : int Smallest (positive) power of `ibeta` that causes overflow. xmax : float ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] usable floating value). irnd : int In ``range(6)``, information on what kind of rounding is done in addition, and on how underflow is handled. ngrd : int Number of 'guard digits' used when truncating the product of two mantissas to fit the representation. epsilon : float Same as `eps`. tiny : float Same as `xmin`. huge : float Same as `xmax`. precision : float ``- int(-log10(eps))`` resolution : float ``- 10**(-precision)`` Parameters ---------- float_conv : function, optional Function that converts an integer or integer array to a float or float array. Default is `float`. int_conv : function, optional Function that converts a float or float array to an integer or integer array. Default is `int`. float_to_float : function, optional Function that converts a float array to float. Default is `float`. Note that this does not seem to do anything useful in the current implementation. float_to_str : function, optional Function that converts a single float to a string. Default is ``lambda v:'%24.16e' %v``. title : str, optional Title that is printed in the string representation of `MachAr`. See Also -------- finfo : Machine limits for floating point types. iinfo : Machine limits for integer types. References ---------- .. [1] Press, Teukolsky, Vetterling and Flannery, "Numerical Recipes in C++," 2nd ed, Cambridge University Press, 2002, p. 31. """ def __init__(self, float_conv=float,int_conv=int, float_to_float=float, float_to_str=lambda v:'%24.16e' % v, title='Python floating point number'): """ float_conv - convert integer to float (array) int_conv - convert float (array) to integer float_to_float - convert float array to float float_to_str - convert array float to str title - description of used floating point numbers """ # We ignore all errors here because we are purposely triggering # underflow to detect the properties of the runninng arch. with errstate(under='ignore'): self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): max_iterN = 10000 msg = "Did not converge after %d tries with %s" one = float_conv(1) two = one + one zero = one - one # Do we really need to do this? Aren't they 2 and 2.0? # Determine ibeta and beta a = one for _ in range(max_iterN): a = a + a temp = a + one temp1 = temp - a if any(temp1 - one != zero): break else: raise RuntimeError(msg % (_, one.dtype)) b = one for _ in range(max_iterN): b = b + b temp = a + b itemp = int_conv(temp-a) if any(itemp != 0): break else: raise RuntimeError(msg % (_, one.dtype)) ibeta = itemp beta = float_conv(ibeta) # Determine it and irnd it = -1 b = one for _ in range(max_iterN): it = it + 1 b = b * beta temp = b + one temp1 = temp - b if any(temp1 - one != zero): break else: raise RuntimeError(msg % (_, one.dtype)) betah = beta / two a = one for _ in range(max_iterN): a = a + a temp = a + one temp1 = temp - a if any(temp1 - one != zero): break else: raise RuntimeError(msg % (_, one.dtype)) temp = a + betah irnd = 0 if any(temp-a != zero): irnd = 1 tempa = a + beta temp = tempa + betah if irnd == 0 and any(temp-tempa != zero): irnd = 2 # Determine negep and epsneg negep = it + 3 betain = one / beta a = one for i in range(negep): a = a * betain b = a for _ in range(max_iterN): temp = one - a if any(temp-one != zero): break a = a * beta negep = negep - 1 # Prevent infinite loop on PPC with gcc 4.0: if negep < 0: raise RuntimeError("could not determine machine tolerance " "for 'negep', locals() -> %s" % (locals())) else: raise RuntimeError(msg % (_, one.dtype)) negep = -negep epsneg = a # Determine machep and eps machep = - it - 3 a = b for _ in range(max_iterN): temp = one + a if any(temp-one != zero): break a = a * beta machep = machep + 1 else: raise RuntimeError(msg % (_, one.dtype)) eps = a # Determine ngrd ngrd = 0 temp = one + eps if irnd == 0 and any(temp*one - one != zero): ngrd = 1 # Determine iexp i = 0 k = 1 z = betain t = one + eps nxres = 0 for _ in range(max_iterN): y = z z = y*y a = z*one # Check here for underflow temp = z*t if any(a+a == zero) or any(abs(z) >= y): break temp1 = temp * betain if any(temp1*beta == z): break i = i + 1 k = k + k else: raise RuntimeError(msg % (_, one.dtype)) if ibeta != 10: iexp = i + 1 mx = k + k else: iexp = 2 iz = ibeta while k >= iz: iz = iz * ibeta iexp = iexp + 1 mx = iz + iz - 1 # Determine minexp and xmin for _ in range(max_iterN): xmin = y y = y * betain a = y * one temp = y * t if any((a + a) != zero) and any(abs(y) < xmin): k = k + 1 temp1 = temp * betain if any(temp1*beta == y) and any(temp != y): nxres = 3 xmin = y break else: break else: raise RuntimeError(msg % (_, one.dtype)) minexp = -k # Determine maxexp, xmax if mx <= k + k - 3 and ibeta != 10: mx = mx + mx iexp = iexp + 1 maxexp = mx + minexp irnd = irnd + nxres if irnd >= 2: maxexp = maxexp - 2 i = maxexp + minexp if ibeta == 2 and not i: maxexp = maxexp - 1 if i > 20: maxexp = maxexp - 1 if any(a != y): maxexp = maxexp - 2 xmax = one - epsneg if any(xmax*one != xmax): xmax = one - beta*epsneg xmax = xmax / (xmin*beta*beta*beta) i = maxexp + minexp + 3 for j in range(i): if ibeta == 2: xmax = xmax + xmax else: xmax = xmax * beta self.ibeta = ibeta self.it = it self.negep = negep self.epsneg = float_to_float(epsneg) self._str_epsneg = float_to_str(epsneg) self.machep = machep self.eps = float_to_float(eps) self._str_eps = float_to_str(eps) self.ngrd = ngrd self.iexp = iexp self.minexp = minexp self.xmin = float_to_float(xmin) self._str_xmin = float_to_str(xmin) self.maxexp = maxexp self.xmax = float_to_float(xmax) self._str_xmax = float_to_str(xmax) self.irnd = irnd self.title = title # Commonly used parameters self.epsilon = self.eps self.tiny = self.xmin self.huge = self.xmax import math self.precision = int(-math.log10(float_to_float(self.eps))) ten = two + two + two + two + two resolution = ten ** (-self.precision) self.resolution = float_to_float(resolution) self._str_resolution = float_to_str(resolution) def __str__(self): fmt = ( 'Machine parameters for %(title)s\n' '---------------------------------------------------------------------\n' 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' '---------------------------------------------------------------------\n' ) return fmt % self.__dict__ if __name__ == '__main__': print(MachAr())
adrienbrault/home-assistant
refs/heads/dev
homeassistant/components/ring/config_flow.py
3
"""Config flow for Ring integration.""" import logging from oauthlib.oauth2 import AccessDeniedError, MissingTokenError from ring_doorbell import Auth import voluptuous as vol from homeassistant import config_entries, const, core, exceptions from . import DOMAIN _LOGGER = logging.getLogger(__name__) async def validate_input(hass: core.HomeAssistant, data): """Validate the user input allows us to connect.""" auth = Auth(f"HomeAssistant/{const.__version__}") try: token = await hass.async_add_executor_job( auth.fetch_token, data["username"], data["password"], data.get("2fa"), ) except MissingTokenError as err: raise Require2FA from err except AccessDeniedError as err: raise InvalidAuth from err return token class RingConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Ring.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL user_pass = None async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: try: token = await validate_input(self.hass, user_input) await self.async_set_unique_id(user_input["username"]) return self.async_create_entry( title=user_input["username"], data={"username": user_input["username"], "token": token}, ) except Require2FA: self.user_pass = user_input return await self.async_step_2fa() except InvalidAuth: errors["base"] = "invalid_auth" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" return self.async_show_form( step_id="user", data_schema=vol.Schema({"username": str, "password": str}), errors=errors, ) async def async_step_2fa(self, user_input=None): """Handle 2fa step.""" if user_input: return await self.async_step_user({**self.user_pass, **user_input}) return self.async_show_form( step_id="2fa", data_schema=vol.Schema({"2fa": str}), ) class Require2FA(exceptions.HomeAssistantError): """Error to indicate we require 2FA.""" class InvalidAuth(exceptions.HomeAssistantError): """Error to indicate there is invalid auth."""
kernel-sanders/arsenic-mobile
refs/heads/master
Dependencies/Twisted-13.0.0/twisted/internet/test/test_baseprocess.py
32
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.internet._baseprocess} which implements process-related functionality that is useful in all platforms supporting L{IReactorProcess}. """ __metaclass__ = type from twisted.python.deprecate import getWarningMethod, setWarningMethod from twisted.trial.unittest import TestCase from twisted.internet._baseprocess import BaseProcess class BaseProcessTests(TestCase): """ Tests for L{BaseProcess}, a parent class for other classes which represent processes which implements functionality common to many different process implementations. """ def test_callProcessExited(self): """ L{BaseProcess._callProcessExited} calls the C{processExited} method of its C{proto} attribute and passes it a L{Failure} wrapping the given exception. """ class FakeProto: reason = None def processExited(self, reason): self.reason = reason reason = RuntimeError("fake reason") process = BaseProcess(FakeProto()) process._callProcessExited(reason) process.proto.reason.trap(RuntimeError) self.assertIdentical(reason, process.proto.reason.value) def test_callProcessExitedMissing(self): """ L{BaseProcess._callProcessExited} emits a L{DeprecationWarning} if the object referred to by its C{proto} attribute has no C{processExited} method. """ class FakeProto: pass reason = object() process = BaseProcess(FakeProto()) self.addCleanup(setWarningMethod, getWarningMethod()) warnings = [] def collect(message, category, stacklevel): warnings.append((message, category, stacklevel)) setWarningMethod(collect) process._callProcessExited(reason) [(message, category, stacklevel)] = warnings self.assertEqual( message, "Since Twisted 8.2, IProcessProtocol.processExited is required. " "%s.%s must implement it." % ( FakeProto.__module__, FakeProto.__name__)) self.assertIdentical(category, DeprecationWarning) # The stacklevel doesn't really make sense for this kind of # deprecation. Requiring it to be 0 will at least avoid pointing to # any part of Twisted or a random part of the application's code, which # I think would be more misleading than having it point inside the # warning system itself. -exarkun self.assertEqual(stacklevel, 0)
jacobrivers123/kernel-nk1-negalite-lt02ltespr
refs/heads/master
tools/perf/scripts/python/failed-syscalls-by-pid.py
11180
# failed system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide failed system call totals, broken down by pid. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return if ret < 0: try: syscalls[common_comm][common_pid][id][ret] += 1 except TypeError: syscalls[common_comm][common_pid][id][ret] = 1 def print_error_totals(): if for_comm is not None: print "\nsyscall errors for %s:\n\n" % (for_comm), else: print "\nsyscall errors:\n\n", print "%-30s %10s\n" % ("comm [pid]", "count"), print "%-30s %10s\n" % ("------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): print " err = %-20s %10d\n" % (strerror(ret), val),
harwee/electrum-xvg-tor
refs/heads/master
lib/plugins.py
2
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2015 Thomas Voegtlin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import traceback import sys import os import imp import pkgutil from util import * from i18n import _ from util import print_error, profiler plugins = {} descriptions = [] loader = None def is_available(name, w): for d in descriptions: if d.get('name') == name: break else: return False deps = d.get('requires', []) for dep, s in deps: try: __import__(dep) except ImportError: return False wallet_types = d.get('requires_wallet_type') if wallet_types: if w.wallet_type not in wallet_types: return False return True @profiler def init_plugins(config, is_local, gui_name): global plugins, descriptions, loader if is_local: fp, pathname, description = imp.find_module('plugins') electrum_plugins = imp.load_module('electrum_xvg_plugins', fp, pathname, description) loader = lambda name: imp.load_source('electrum_xvg_plugins.' + name, os.path.join(pathname, name + '.py')) else: electrum_plugins = __import__('electrum_xvg_plugins') loader = lambda name: __import__('electrum_xvg_plugins.' + name, fromlist=['electrum_xvg_plugins']) def constructor(name, storage): if plugins.get(name) is None: try: print_error(_("Loading plugin by constructor:"), name) p = loader(name) plugins[name] = p.Plugin(config, name) except: print_msg(_("Error: cannot initialize plugin"), name) return return plugins[name].constructor(storage) def register_wallet_type(name, x, constructor): import wallet x += (lambda storage: constructor(name, storage),) wallet.wallet_types.append(x) descriptions = electrum_plugins.descriptions for item in descriptions: name = item['name'] if gui_name not in item.get('available_for', []): continue x = item.get('registers_wallet_type') if x: register_wallet_type(name, x, constructor) if not config.get('use_' + name): continue try: p = loader(name) plugins[name] = p.Plugin(config, name) except Exception: print_msg(_("Error: cannot initialize plugin"), name) traceback.print_exc(file=sys.stdout) hook_names = set() hooks = {} def hook(func): hook_names.add(func.func_name) return func def run_hook(name, *args): return _run_hook(name, False, *args) def always_hook(name, *args): return _run_hook(name, True, *args) def _run_hook(name, always, *args): results = [] f_list = hooks.get(name, []) for p, f in f_list: if name == 'load_wallet': p.wallet = args[0] if name == 'init_qt': gui = args[0] p.window = gui.main_window if always or p.is_enabled(): try: r = f(*args) except Exception: print_error("Plugin error") traceback.print_exc(file=sys.stdout) r = False if r: results.append(r) if name == 'close_wallet': p.wallet = None if results: assert len(results) == 1, results return results[0] class BasePlugin: def __init__(self, config, name): self.name = name self.config = config self.wallet = None # add self to hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.append((self, getattr(self, k))) hooks[k] = l def close(self): # remove self from hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.remove((self, getattr(self, k))) hooks[k] = l def print_error(self, *msg): print_error("[%s]"%self.name, *msg) def requires_settings(self): return False def enable(self): self.set_enabled(True) return True def disable(self): self.set_enabled(False) return True def init_qt(self, gui): pass @hook def load_wallet(self, wallet, window): pass @hook def close_wallet(self): pass #def init(self): pass def is_enabled(self): return self.is_available() and self.config.get('use_'+self.name) is True def is_available(self): return True def set_enabled(self, enabled): self.config.set_key('use_'+self.name, enabled, True) def settings_dialog(self): pass
joshimio/blog
refs/heads/master
node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/default.py
364
# -*- coding: utf-8 -*- """ pygments.styles.default ~~~~~~~~~~~~~~~~~~~~~~~ The default highlighting style. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class DefaultStyle(Style): """ The default style (inspired by Emacs 22). """ background_color = "#f8f8f8" default_style = "" styles = { Whitespace: "#bbbbbb", Comment: "italic #408080", Comment.Preproc: "noitalic #BC7A00", #Keyword: "bold #AA22FF", Keyword: "bold #008000", Keyword.Pseudo: "nobold", Keyword.Type: "nobold #B00040", Operator: "#666666", Operator.Word: "bold #AA22FF", Name.Builtin: "#008000", Name.Function: "#0000FF", Name.Class: "bold #0000FF", Name.Namespace: "bold #0000FF", Name.Exception: "bold #D2413A", Name.Variable: "#19177C", Name.Constant: "#880000", Name.Label: "#A0A000", Name.Entity: "bold #999999", Name.Attribute: "#7D9029", Name.Tag: "bold #008000", Name.Decorator: "#AA22FF", String: "#BA2121", String.Doc: "italic", String.Interpol: "bold #BB6688", String.Escape: "bold #BB6622", String.Regex: "#BB6688", #String.Symbol: "#B8860B", String.Symbol: "#19177C", String.Other: "#008000", Number: "#666666", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#A00000", Generic.Inserted: "#00A000", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #000080", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
Dev-Cloud-Platform/Dev-Cloud
refs/heads/master
dev_cloud/cc1/src/wi/urls/user/farm.py
1
# -*- coding: utf-8 -*- # @COPYRIGHT_begin # # Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @COPYRIGHT_end """@package src.wi.urls.user.farm @author Piotr Wójcik @date 14.11.2011 """ from django.conf.urls import patterns, url, include from django.utils.translation import ugettext_lazy as _ from wi.forms.farm import CreateFarmForm1, CreateFarmForm2, CreateFarmForm3, CreateFarmForm4 from wi.forms.vm import EditVMForm from wi.utils.decorators import user_permission from wi.utils.views import direct_to_template, simple_generic_id, form_generic_id from wi.views.user.farm import CreateFarmWizard farm_patterns = patterns('wi.views.user.farm', url(r'^$', user_permission(direct_to_template), {'template_name': 'farms/base.html'}, name='far_farms'), url(r'^potato/$', user_permission(direct_to_template), {'template_name': 'farms/potato.html'}, name='far_potato'), url(r'^create_farm/$', CreateFarmWizard.as_view( [CreateFarmForm1, CreateFarmForm2, CreateFarmForm3, CreateFarmForm4]), name='far_create_farm'), url(r'^show_farm/$', user_permission(direct_to_template), {'template_name': 'farms/show_farm.html'}, name='far_show_farm'), url(r'^ajax/get_table/$', 'far_ajax_get_table', name='far_ajax_get_table'), url(r'^ajax/destroy_farm/(?P<id1>\d+)/$', user_permission(simple_generic_id), {'template_name': 'generic/simple.html', 'success_msg': ( lambda desc: _('You have successfully destroyed farm <b>%(desc)s</b>.') % {'desc': desc}), 'ask_msg': ( lambda desc: _('Do you really want to destroy farm <b>%(desc)s</b>?') % {'desc': desc}), 'request_url': 'user/farm/destroy/', 'id_key': 'farm_id', }, name='far_ajax_destroy_farm'), url(r'^ajax/save_and_shutdown_farm/(?P<id1>\d+)/$', user_permission(form_generic_id), {'template_name': 'generic/form.html', 'success_msg': (lambda desc, data: _('Farm head will be saved.') % {'desc': desc}), 'ask_msg': ( lambda desc: _('The farm will be closed. Enter a name to save head of this farm.') % { 'desc': desc}), 'confirmation': _('Save and shutdown'), 'request_url_post': 'user/farm/save_and_shutdown/', 'request_url_get': 'user/farm/get_by_id/', 'form_class': EditVMForm, 'id_key': 'farm_id', }, name='far_ajax_save_and_shutdown'), ) urlpatterns = patterns('', url(r'^farm/', include(farm_patterns)), )
pombredanne/pyjs
refs/heads/master
examples/uitest/libuitest/DOMTest.py
6
from pyjamas.ui.RootPanel import RootPanel from pyjamas import DOM from UnitTest import UnitTest1 from __pyjamas__ import doc #TODO: see issue 768 class DOMTest(UnitTest1): def testDivHTML(self): e = DOM.getElementById('tests') div = DOM.createElement('div') DOM.appendChild(e, div) DOM.setInnerHTML(div, 'hello world\n') self.write_test_output('addDiv') DOM.removeChild(e, div) self.write_test_output('removeDiv') def testDivText(self): e = DOM.getElementById('tests') div = DOM.createElement('div') DOM.appendChild(e, div) div2 = DOM.createElement('div') DOM.appendChild(div, div2) DOM.setInnerText(div2, 'hello world\n') self.write_test_output('addDiv') DOM.removeChild(e, div) self.write_test_output('removeDiv')
teleyinex/watchdog
refs/heads/master
src/watchdog/__init__.py
35
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com> # Copyright 2012 Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
batrick/cctools
refs/heads/master
makeflow/test/linker/001/a.py
26
import sys from b import *
samarthmed/datajam
refs/heads/master
app/__init__.py
1
from flask import Flask from flask.ext.cors import CORS app = Flask("data-jam" , template_folder= "./app/templates", static_folder='./app/static') CORS(app, resources = {r"/listing/*" : {"origins" : "*"}}) # Entry point for all apps .... from dashboard import controllers
ZSeaPeng/python
refs/heads/master
kamen/0002/gene_200_keys.py
76
import uuid,pickle class gene_keys(): def __init__(self,num): self.num = num; self.list = []; def gene_list(self): for i in range(self.num): key=uuid.uuid1(); self.list.append(key) def return_list(self): return self.list; g=gene_keys(200); g.gene_list(); keys_list=g.return_list(); #print keys_list with open('keys.txt','wb') as f: for key in keys_list: f.write(str(key).replace('-','')+'\n')
sliz1/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/py/py/_code/_assertionold.py
218
import py import sys, inspect from compiler import parse, ast, pycodegen from py._code.assertion import BuiltinAssertionError, _format_explanation passthroughex = py.builtin._sysex class Failure: def __init__(self, node): self.exc, self.value, self.tb = sys.exc_info() self.node = node class View(object): """View base class. If C is a subclass of View, then C(x) creates a proxy object around the object x. The actual class of the proxy is not C in general, but a *subclass* of C determined by the rules below. To avoid confusion we call view class the class of the proxy (a subclass of C, so of View) and object class the class of x. Attributes and methods not found in the proxy are automatically read on x. Other operations like setting attributes are performed on the proxy, as determined by its view class. The object x is available from the proxy as its __obj__ attribute. The view class selection is determined by the __view__ tuples and the optional __viewkey__ method. By default, the selected view class is the most specific subclass of C whose __view__ mentions the class of x. If no such subclass is found, the search proceeds with the parent object classes. For example, C(True) will first look for a subclass of C with __view__ = (..., bool, ...) and only if it doesn't find any look for one with __view__ = (..., int, ...), and then ..., object,... If everything fails the class C itself is considered to be the default. Alternatively, the view class selection can be driven by another aspect of the object x, instead of the class of x, by overriding __viewkey__. See last example at the end of this module. """ _viewcache = {} __view__ = () def __new__(rootclass, obj, *args, **kwds): self = object.__new__(rootclass) self.__obj__ = obj self.__rootclass__ = rootclass key = self.__viewkey__() try: self.__class__ = self._viewcache[key] except KeyError: self.__class__ = self._selectsubclass(key) return self def __getattr__(self, attr): # attributes not found in the normal hierarchy rooted on View # are looked up in the object's real class return getattr(self.__obj__, attr) def __viewkey__(self): return self.__obj__.__class__ def __matchkey__(self, key, subclasses): if inspect.isclass(key): keys = inspect.getmro(key) else: keys = [key] for key in keys: result = [C for C in subclasses if key in C.__view__] if result: return result return [] def _selectsubclass(self, key): subclasses = list(enumsubclasses(self.__rootclass__)) for C in subclasses: if not isinstance(C.__view__, tuple): C.__view__ = (C.__view__,) choices = self.__matchkey__(key, subclasses) if not choices: return self.__rootclass__ elif len(choices) == 1: return choices[0] else: # combine the multiple choices return type('?', tuple(choices), {}) def __repr__(self): return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) def enumsubclasses(cls): for subcls in cls.__subclasses__(): for subsubclass in enumsubclasses(subcls): yield subsubclass yield cls class Interpretable(View): """A parse tree node with a few extra methods.""" explanation = None def is_builtin(self, frame): return False def eval(self, frame): # fall-back for unknown expression nodes try: expr = ast.Expression(self.__obj__) expr.filename = '<eval>' self.__obj__.filename = '<eval>' co = pycodegen.ExpressionCodeGenerator(expr).getCode() result = frame.eval(co) except passthroughex: raise except: raise Failure(self) self.result = result self.explanation = self.explanation or frame.repr(self.result) def run(self, frame): # fall-back for unknown statement nodes try: expr = ast.Module(None, ast.Stmt([self.__obj__])) expr.filename = '<run>' co = pycodegen.ModuleCodeGenerator(expr).getCode() frame.exec_(co) except passthroughex: raise except: raise Failure(self) def nice_explanation(self): return _format_explanation(self.explanation) class Name(Interpretable): __view__ = ast.Name def is_local(self, frame): source = '%r in locals() is not globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_global(self, frame): source = '%r in globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_builtin(self, frame): source = '%r not in locals() and %r not in globals()' % ( self.name, self.name) try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def eval(self, frame): super(Name, self).eval(frame) if not self.is_local(frame): self.explanation = self.name class Compare(Interpretable): __view__ = ast.Compare def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) for operation, expr2 in self.ops: if hasattr(self, 'result'): # shortcutting in chained expressions if not frame.is_true(self.result): break expr2 = Interpretable(expr2) expr2.eval(frame) self.explanation = "%s %s %s" % ( expr.explanation, operation, expr2.explanation) source = "__exprinfo_left %s __exprinfo_right" % operation try: self.result = frame.eval(source, __exprinfo_left=expr.result, __exprinfo_right=expr2.result) except passthroughex: raise except: raise Failure(self) expr = expr2 class And(Interpretable): __view__ = ast.And def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if not frame.is_true(expr.result): break self.explanation = '(' + ' and '.join(explanations) + ')' class Or(Interpretable): __view__ = ast.Or def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if frame.is_true(expr.result): break self.explanation = '(' + ' or '.join(explanations) + ')' # == Unary operations == keepalive = [] for astclass, astpattern in { ast.Not : 'not __exprinfo_expr', ast.Invert : '(~__exprinfo_expr)', }.items(): class UnaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): expr = Interpretable(self.expr) expr.eval(frame) self.explanation = astpattern.replace('__exprinfo_expr', expr.explanation) try: self.result = frame.eval(astpattern, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) keepalive.append(UnaryArith) # == Binary operations == for astclass, astpattern in { ast.Add : '(__exprinfo_left + __exprinfo_right)', ast.Sub : '(__exprinfo_left - __exprinfo_right)', ast.Mul : '(__exprinfo_left * __exprinfo_right)', ast.Div : '(__exprinfo_left / __exprinfo_right)', ast.Mod : '(__exprinfo_left % __exprinfo_right)', ast.Power : '(__exprinfo_left ** __exprinfo_right)', }.items(): class BinaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): left = Interpretable(self.left) left.eval(frame) right = Interpretable(self.right) right.eval(frame) self.explanation = (astpattern .replace('__exprinfo_left', left .explanation) .replace('__exprinfo_right', right.explanation)) try: self.result = frame.eval(astpattern, __exprinfo_left=left.result, __exprinfo_right=right.result) except passthroughex: raise except: raise Failure(self) keepalive.append(BinaryArith) class CallFunc(Interpretable): __view__ = ast.CallFunc def is_bool(self, frame): source = 'isinstance(__exprinfo_value, bool)' try: return frame.is_true(frame.eval(source, __exprinfo_value=self.result)) except passthroughex: raise except: return False def eval(self, frame): node = Interpretable(self.node) node.eval(frame) explanations = [] vars = {'__exprinfo_fn': node.result} source = '__exprinfo_fn(' for a in self.args: if isinstance(a, ast.Keyword): keyword = a.name a = a.expr else: keyword = None a = Interpretable(a) a.eval(frame) argname = '__exprinfo_%d' % len(vars) vars[argname] = a.result if keyword is None: source += argname + ',' explanations.append(a.explanation) else: source += '%s=%s,' % (keyword, argname) explanations.append('%s=%s' % (keyword, a.explanation)) if self.star_args: star_args = Interpretable(self.star_args) star_args.eval(frame) argname = '__exprinfo_star' vars[argname] = star_args.result source += '*' + argname + ',' explanations.append('*' + star_args.explanation) if self.dstar_args: dstar_args = Interpretable(self.dstar_args) dstar_args.eval(frame) argname = '__exprinfo_kwds' vars[argname] = dstar_args.result source += '**' + argname + ',' explanations.append('**' + dstar_args.explanation) self.explanation = "%s(%s)" % ( node.explanation, ', '.join(explanations)) if source.endswith(','): source = source[:-1] source += ')' try: self.result = frame.eval(source, **vars) except passthroughex: raise except: raise Failure(self) if not node.is_builtin(frame) or not self.is_bool(frame): r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) class Getattr(Interpretable): __view__ = ast.Getattr def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) source = '__exprinfo_expr.%s' % self.attrname try: self.result = frame.eval(source, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) self.explanation = '%s.%s' % (expr.explanation, self.attrname) # if the attribute comes from the instance, its value is interesting source = ('hasattr(__exprinfo_expr, "__dict__") and ' '%r in __exprinfo_expr.__dict__' % self.attrname) try: from_instance = frame.is_true( frame.eval(source, __exprinfo_expr=expr.result)) except passthroughex: raise except: from_instance = True if from_instance: r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) # == Re-interpretation of full statements == class Assert(Interpretable): __view__ = ast.Assert def run(self, frame): test = Interpretable(self.test) test.eval(frame) # simplify 'assert False where False = ...' if (test.explanation.startswith('False\n{False = ') and test.explanation.endswith('\n}')): test.explanation = test.explanation[15:-2] # print the result as 'assert <explanation>' self.result = test.result self.explanation = 'assert ' + test.explanation if not frame.is_true(test.result): try: raise BuiltinAssertionError except passthroughex: raise except: raise Failure(self) class Assign(Interpretable): __view__ = ast.Assign def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = '... = ' + expr.explanation # fall-back-run the rest of the assignment ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) mod = ast.Module(None, ast.Stmt([ass])) mod.filename = '<run>' co = pycodegen.ModuleCodeGenerator(mod).getCode() try: frame.exec_(co, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) class Discard(Interpretable): __view__ = ast.Discard def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = expr.explanation class Stmt(Interpretable): __view__ = ast.Stmt def run(self, frame): for stmt in self.nodes: stmt = Interpretable(stmt) stmt.run(frame) def report_failure(e): explanation = e.node.nice_explanation() if explanation: explanation = ", in: " + explanation else: explanation = "" sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) def check(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) expr = parse(s, 'eval') assert isinstance(expr, ast.Expression) node = Interpretable(expr.node) try: node.eval(frame) except passthroughex: raise except Failure: e = sys.exc_info()[1] report_failure(e) else: if not frame.is_true(node.result): sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) ########################################################### # API / Entry points # ######################################################### def interpret(source, frame, should_fail=False): module = Interpretable(parse(source, 'exec').node) #print "got module", module if isinstance(frame, py.std.types.FrameType): frame = py.code.Frame(frame) try: module.run(frame) except Failure: e = sys.exc_info()[1] return getfailure(e) except passthroughex: raise except: import traceback traceback.print_exc() if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " "compute assert expression before the assert or use --nomagic)") else: return None def getmsg(excinfo): if isinstance(excinfo, tuple): excinfo = py.code.ExceptionInfo(excinfo) #frame, line = gettbline(tb) #frame = py.code.Frame(frame) #return interpret(line, frame) tb = excinfo.traceback[-1] source = str(tb.statement).strip() x = interpret(source, tb.frame, should_fail=True) if not isinstance(x, str): raise TypeError("interpret returned non-string %r" % (x,)) return x def getfailure(e): explanation = e.node.nice_explanation() if str(e.value): lines = explanation.split('\n') lines[0] += " << %s" % (e.value,) explanation = '\n'.join(lines) text = "%s: %s" % (e.exc.__name__, explanation) if text.startswith('AssertionError: assert '): text = text[16:] return text def run(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) module = Interpretable(parse(s, 'exec').node) try: module.run(frame) except Failure: e = sys.exc_info()[1] report_failure(e) if __name__ == '__main__': # example: def f(): return 5 def g(): return 3 def h(x): return 'never' check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") check("f() == g()") i = 4 check("i == f()") check("len(f()) == 0") check("isinstance(2+3+4, float)") run("x = i") check("x == 5") run("assert not f(), 'oops'") run("a, b, c = 1, 2") run("a, b, c = f()") check("max([f(),g()]) == 4") check("'hello'[g()] == 'h'") run("'guk%d' % h(f())")
Ircam-Web/mezzanine-organization
refs/heads/master
organization/network/migrations/0093_auto_20170301_1544.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.9.11 on 2017-03-01 14:44 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('organization-network', '0092_auto_20170228_1317'), ] operations = [ migrations.AlterModelOptions( name='personactivitytimesheet', options={'ordering': ['-year', 'month', 'project'], 'verbose_name': 'activity timesheet', 'verbose_name_plural': 'activity timesheets'}, ), ]
kshitizanand/authomatic
refs/heads/master
examples/pyramid/functional_test/__init__.py
14
__author__ = 'phudec'
Venturi/cms
refs/heads/master
env/lib/python2.7/site-packages/django/db/migrations/writer.py
16
from __future__ import unicode_literals import collections import datetime import decimal import math import os import re import sys import types from importlib import import_module from django.apps import apps from django.db import migrations, models from django.db.migrations.loader import MigrationLoader from django.db.migrations.operations.base import Operation from django.utils import datetime_safe, six from django.utils._os import upath from django.utils.encoding import force_text from django.utils.functional import Promise from django.utils.inspect import get_func_args from django.utils.timezone import utc from django.utils.version import get_docs_version COMPILED_REGEX_TYPE = type(re.compile('')) class SettingsReference(str): """ Special subclass of string which actually references a current settings value. It's treated as the value in memory, but serializes out to a settings.NAME attribute reference. """ def __new__(self, value, setting_name): return str.__new__(self, value) def __init__(self, value, setting_name): self.setting_name = setting_name class OperationWriter(object): def __init__(self, operation, indentation=2): self.operation = operation self.buff = [] self.indentation = indentation def serialize(self): def _write(_arg_name, _arg_value): if (_arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict))): if isinstance(_arg_value, dict): self.feed('%s={' % _arg_name) self.indent() for key, value in _arg_value.items(): key_string, key_imports = MigrationWriter.serialize(key) arg_string, arg_imports = MigrationWriter.serialize(value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s: %s' % (key_string, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s: %s,' % (key_string, arg_string)) imports.update(key_imports) imports.update(arg_imports) self.unindent() self.feed('},') else: self.feed('%s=[' % _arg_name) self.indent() for item in _arg_value: arg_string, arg_imports = MigrationWriter.serialize(item) args = arg_string.splitlines() if len(args) > 1: for arg in args[:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s,' % arg_string) imports.update(arg_imports) self.unindent() self.feed('],') else: arg_string, arg_imports = MigrationWriter.serialize(_arg_value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s=%s' % (_arg_name, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s=%s,' % (_arg_name, arg_string)) imports.update(arg_imports) imports = set() name, args, kwargs = self.operation.deconstruct() operation_args = get_func_args(self.operation.__init__) # See if this operation is in django.db.migrations. If it is, # We can just use the fact we already have that imported, # otherwise, we need to add an import for the operation class. if getattr(migrations, name, None) == self.operation.__class__: self.feed('migrations.%s(' % name) else: imports.add('import %s' % (self.operation.__class__.__module__)) self.feed('%s.%s(' % (self.operation.__class__.__module__, name)) self.indent() for i, arg in enumerate(args): arg_value = arg arg_name = operation_args[i] _write(arg_name, arg_value) i = len(args) # Only iterate over remaining arguments for arg_name in operation_args[i:]: if arg_name in kwargs: # Don't sort to maintain signature order arg_value = kwargs[arg_name] _write(arg_name, arg_value) self.unindent() self.feed('),') return self.render(), imports def indent(self): self.indentation += 1 def unindent(self): self.indentation -= 1 def feed(self, line): self.buff.append(' ' * (self.indentation * 4) + line) def render(self): return '\n'.join(self.buff) class MigrationWriter(object): """ Takes a Migration instance and is able to produce the contents of the migration file from it. """ def __init__(self, migration): self.migration = migration self.needs_manual_porting = False def as_string(self): """ Returns a string of the file contents. """ items = { "replaces_str": "", } imports = set() # Deconstruct operations operations = [] for operation in self.migration.operations: operation_string, operation_imports = OperationWriter(operation).serialize() imports.update(operation_imports) operations.append(operation_string) items["operations"] = "\n".join(operations) + "\n" if operations else "" # Format dependencies and write out swappable dependencies right dependencies = [] for dependency in self.migration.dependencies: if dependency[0] == "__setting__": dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1]) imports.add("from django.conf import settings") else: # No need to output bytestrings for dependencies dependency = tuple(force_text(s) for s in dependency) dependencies.append(" %s," % self.serialize(dependency)[0]) items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" # Format imports nicely, swapping imports of functions from migration files # for comments migration_imports = set() for line in list(imports): if re.match("^import (.*)\.\d+[^\s]*$", line): migration_imports.add(line.split("import")[1].strip()) imports.remove(line) self.needs_manual_porting = True imports.discard("from django.db import models") items["imports"] = "\n".join(imports) + "\n" if imports else "" if migration_imports: items["imports"] += ( "\n\n# Functions from the following migrations need manual " "copying.\n# Move them and any dependencies into this file, " "then update the\n# RunPython operations to refer to the local " "versions:\n# %s" ) % "\n# ".join(migration_imports) # If there's a replaces, make a string for it if self.migration.replaces: items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] return (MIGRATION_TEMPLATE % items).encode("utf8") @staticmethod def serialize_datetime(value): """ Returns a serialized version of a datetime object that is valid, executable python code. It converts timezone-aware values to utc with an 'executable' utc representation of tzinfo. """ if value.tzinfo is not None and value.tzinfo != utc: value = value.astimezone(utc) value_repr = repr(value).replace("<UTC>", "utc") if isinstance(value, datetime_safe.datetime): value_repr = "datetime.%s" % value_repr return value_repr @property def filename(self): return "%s.py" % self.migration.name @property def path(self): migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label) # See if we can import the migrations module directly try: migrations_module = import_module(migrations_package_name) # Python 3 fails when the migrations directory does not have a # __init__.py file if not hasattr(migrations_module, '__file__'): raise ImportError basedir = os.path.dirname(upath(migrations_module.__file__)) except ImportError: app_config = apps.get_app_config(self.migration.app_label) migrations_package_basename = migrations_package_name.split(".")[-1] # Alright, see if it's a direct submodule of the app if '%s.%s' % (app_config.name, migrations_package_basename) == migrations_package_name: basedir = os.path.join(app_config.path, migrations_package_basename) else: # In case of using MIGRATION_MODULES setting and the custom # package doesn't exist, create one. package_dirs = migrations_package_name.split(".") create_path = os.path.join(upath(sys.path[0]), *package_dirs) if not os.path.isdir(create_path): os.makedirs(create_path) for i in range(1, len(package_dirs) + 1): init_dir = os.path.join(upath(sys.path[0]), *package_dirs[:i]) init_path = os.path.join(init_dir, "__init__.py") if not os.path.isfile(init_path): open(init_path, "w").close() return os.path.join(create_path, self.filename) return os.path.join(basedir, self.filename) @classmethod def serialize_deconstructed(cls, path, args, kwargs): name, imports = cls._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = cls.serialize(arg) strings.append(arg_string) imports.update(arg_imports) for kw, arg in kwargs.items(): arg_string, arg_imports = cls.serialize(arg) imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @classmethod def _serialize_path(cls, path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports @classmethod def serialize(cls, value): """ Serializes the value to a string that's parsable by Python, along with any needed imports to make that string work. More advanced than repr() as it can encode things like datetime.datetime.now. """ # FIXME: Ideally Promise would be reconstructible, but for now we # use force_text on them and defer to the normal string serialization # process. if isinstance(value, Promise): value = force_text(value) # Sequences if isinstance(value, (list, set, tuple)): imports = set() strings = [] for item in value: item_string, item_imports = cls.serialize(item) imports.update(item_imports) strings.append(item_string) if isinstance(value, set): # Don't use the literal "{%s}" as it doesn't support empty set format = "set([%s])" elif isinstance(value, tuple): # When len(value)==0, the empty tuple should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. format = "(%s)" if len(value) != 1 else "(%s,)" else: format = "[%s]" return format % (", ".join(strings)), imports # Dictionaries elif isinstance(value, dict): imports = set() strings = [] for k, v in value.items(): k_string, k_imports = cls.serialize(k) v_string, v_imports = cls.serialize(v) imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports # Datetimes elif isinstance(value, datetime.datetime): value_repr = cls.serialize_datetime(value) imports = ["import datetime"] if value.tzinfo is not None: imports.append("from django.utils.timezone import utc") return value_repr, set(imports) # Dates elif isinstance(value, datetime.date): value_repr = repr(value) if isinstance(value, datetime_safe.date): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} # Times elif isinstance(value, datetime.time): value_repr = repr(value) if isinstance(value, datetime_safe.time): value_repr = "datetime.%s" % value_repr return value_repr, {"import datetime"} # Timedeltas elif isinstance(value, datetime.timedelta): return repr(value), {"import datetime"} # Settings references elif isinstance(value, SettingsReference): return "settings.%s" % value.setting_name, {"from django.conf import settings"} # Simple types elif isinstance(value, float): if math.isnan(value) or math.isinf(value): return 'float("{}")'.format(value), set() return repr(value), set() elif isinstance(value, six.integer_types + (bool, type(None))): return repr(value), set() elif isinstance(value, six.binary_type): value_repr = repr(value) if six.PY2: # Prepend the `b` prefix since we're importing unicode_literals value_repr = 'b' + value_repr return value_repr, set() elif isinstance(value, six.text_type): value_repr = repr(value) if six.PY2: # Strip the `u` prefix since we're importing unicode_literals value_repr = value_repr[1:] return value_repr, set() # Decimal elif isinstance(value, decimal.Decimal): return repr(value), {"from decimal import Decimal"} # Django fields elif isinstance(value, models.Field): attr_name, path, args, kwargs = value.deconstruct() return cls.serialize_deconstructed(path, args, kwargs) # Classes elif isinstance(value, type): special_cases = [ (models.Model, "models.Model", []), ] for case, string, imports in special_cases: if case is value: return string, set(imports) if hasattr(value, "__module__"): module = value.__module__ if module == six.moves.builtins.__name__: return value.__name__, set() else: return "%s.%s" % (module, value.__name__), {"import %s" % module} elif isinstance(value, models.manager.BaseManager): as_manager, manager_path, qs_path, args, kwargs = value.deconstruct() if as_manager: name, imports = cls._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return cls.serialize_deconstructed(manager_path, args, kwargs) elif isinstance(value, Operation): string, imports = OperationWriter(value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(','), imports # Anything that knows how to deconstruct itself. elif hasattr(value, 'deconstruct'): return cls.serialize_deconstructed(*value.deconstruct()) # Functions elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)): # @classmethod? if getattr(value, "__self__", None) and isinstance(value.__self__, type): klass = value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module} # Further error checking if value.__name__ == '<lambda>': raise ValueError("Cannot serialize function: lambda") if value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % value) # Python 3 is a lot easier, and only uses this branch if it's not local. if getattr(value, "__qualname__", None) and getattr(value, "__module__", None): if "<" not in value.__qualname__: # Qualname can include <locals> return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__} # Python 2/fallback version module_name = value.__module__ # Make sure it's actually there and not an unbound method module = import_module(module_name) if not hasattr(module, value.__name__): raise ValueError( "Could not find function %s in %s.\n" "Please note that due to Python 2 limitations, you cannot " "serialize unbound method functions (e.g. a method " "declared and used in the same class body). Please move " "the function into the main module body to use migrations.\n" "For more information, see " "https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values" % (value.__name__, module_name, get_docs_version())) return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name} # Other iterables elif isinstance(value, collections.Iterable): imports = set() strings = [] for item in value: item_string, item_imports = cls.serialize(item) imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. format = "(%s)" if len(strings) != 1 else "(%s,)" return format % (", ".join(strings)), imports # Compiled regex elif isinstance(value, COMPILED_REGEX_TYPE): imports = {"import re"} regex_pattern, pattern_imports = cls.serialize(value.pattern) regex_flags, flag_imports = cls.serialize(value.flags) imports.update(pattern_imports) imports.update(flag_imports) args = [regex_pattern] if value.flags: args.append(regex_flags) return "re.compile(%s)" % ', '.join(args), imports # Uh oh. else: raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) ) MIGRATION_TEMPLATE = """\ # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models %(imports)s class Migration(migrations.Migration): %(replaces_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ] """
SpiriLiao/linux
refs/heads/master
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
Gimpneek/exclusive-raid-gym-tracker
refs/heads/master
app/tests/views/test_remove_tracked_gym.py
1
""" Test the /gym-management/remove/ view """ from django.urls import reverse_lazy from app.models.profile import Profile from app.tests.views.gym_item_common import GymViewCommonCase class TestRemoveTrackedGym(GymViewCommonCase): """ Remove Tracked Gym view tests """ def test_redirects_logged_out_user(self): """ Test that a logged out user is redirected to the Homepage """ resp = self.client.get( reverse_lazy( 'remove_tracked_gym', kwargs={ 'gym_id': self.gym_item.gym.id } ) ) self.assertTrue(str(reverse_lazy('login')) in resp.url) def test_removes_tracked_gym(self): """ Test that if the supplied gym is being tracked that it removes that gym from the list of tracked gyms """ profile = Profile.objects.get(user=self.user) self.client.login(username='test', password='password') resp = self.client.get( reverse_lazy( 'remove_tracked_gym', kwargs={ 'gym_id': self.gym_item.gym.id } ) ) self.assertTrue(str(reverse_lazy('gym_management')) in resp.url) self.assertTrue(self.gym_item.gym not in profile.tracked_gyms.all()) self.assertEqual(profile.tracked_gyms.count(), 0) def test_removing_untracked_gym(self): """ Test that when if there's no tracked gyms that it raises an exception """ profile = Profile.objects.get(user=self.user) profile.tracked_gyms.remove(self.gym_item.gym) self.client.login(username='test', password='password') resp = self.client.get( reverse_lazy( 'remove_tracked_gym', kwargs={ 'gym_id': self.gym_item.gym.id } ) ) self.assertEqual(resp.status_code, 400)
kidmaple/CoolWall
refs/heads/nios2
user/python/Lib/distutils/unixccompiler.py
4
"""distutils.unixccompiler Contains the UnixCCompiler class, a subclass of CCompiler that handles the "typical" Unix-style command-line C compiler: * macros defined with -Dname[=value] * macros undefined with -Uname * include search directories specified with -Idir * libraries specified with -lllib * library search directories specified with -Ldir * compile handled by 'cc' (or similar) executable with -c option: compiles .c to .o * link static library handled by 'ar' command (possibly with 'ranlib') * link shared library handled by 'cc -shared' """ # created 1999/07/05, Greg Ward __revision__ = "$Id: unixccompiler.py,v 1.32 2000/09/27 02:08:14 gward Exp $" import string, re, os from types import * from copy import copy from distutils.dep_util import newer from distutils.ccompiler import \ CCompiler, gen_preprocess_options, gen_lib_options from distutils.errors import \ DistutilsExecError, CompileError, LibError, LinkError # XXX Things not currently handled: # * optimization/debug/warning flags; we just use whatever's in Python's # Makefile and live with it. Is this adequate? If not, we might # have to have a bunch of subclasses GNUCCompiler, SGICCompiler, # SunCCompiler, and I suspect down that road lies madness. # * even if we don't know a warning flag from an optimization flag, # we need some way for outsiders to feed preprocessor/compiler/linker # flags in to us -- eg. a sysadmin might want to mandate certain flags # via a site config file, or a user might want to set something for # compiling this module distribution only via the setup.py command # line, whatever. As long as these options come from something on the # current system, they can be as system-dependent as they like, and we # should just happily stuff them into the preprocessor/compiler/linker # options and carry on. class UnixCCompiler (CCompiler): compiler_type = 'unix' # These are used by CCompiler in two places: the constructor sets # instance attributes 'preprocessor', 'compiler', etc. from them, and # 'set_executable()' allows any of these to be set. The defaults here # are pretty generic; they will probably have to be set by an outsider # (eg. using information discovered by the sysconfig about building # Python extensions). executables = {'preprocessor' : None, 'compiler' : ["cc"], 'compiler_so' : ["cc"], 'linker_so' : ["cc", "-shared"], 'linker_exe' : ["cc"], 'archiver' : ["ar", "-cr"], 'ranlib' : None, } # Needed for the filename generation methods provided by the base # class, CCompiler. NB. whoever instantiates/uses a particular # UnixCCompiler instance should set 'shared_lib_ext' -- we set a # reasonable common default here, but it's not necessarily used on all # Unices! src_extensions = [".c",".C",".cc",".cxx",".cpp"] obj_extension = ".o" static_lib_extension = ".a" shared_lib_extension = ".so" static_lib_format = shared_lib_format = "lib%s%s" def __init__ (self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) def preprocess (self, source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None): (_, macros, include_dirs) = \ self._fix_compile_args(None, macros, include_dirs) pp_opts = gen_preprocess_options(macros, include_dirs) pp_args = self.preprocessor + pp_opts if output_file: pp_args.extend(['-o', output_file]) if extra_preargs: pp_args[:0] = extra_preargs if extra_postargs: extra_postargs.extend(extra_postargs) # We need to preprocess: either we're being forced to, or the # source file is newer than the target (or the target doesn't # exist). if self.force or (output_file and newer(source, output_file)): if output_file: self.mkpath(os.path.dirname(output_file)) try: self.spawn(pp_args) except DistutilsExecError, msg: raise CompileError, msg def compile (self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None): (output_dir, macros, include_dirs) = \ self._fix_compile_args(output_dir, macros, include_dirs) (objects, skip_sources) = self._prep_compile(sources, output_dir) # Figure out the options for the compiler command line. pp_opts = gen_preprocess_options(macros, include_dirs) cc_args = pp_opts + ['-c'] if debug: cc_args[:0] = ['-g'] if extra_preargs: cc_args[:0] = extra_preargs if extra_postargs is None: extra_postargs = [] # Compile all source files that weren't eliminated by # '_prep_compile()'. for i in range(len(sources)): src = sources[i] ; obj = objects[i] if skip_sources[src]: self.announce("skipping %s (%s up-to-date)" % (src, obj)) else: self.mkpath(os.path.dirname(obj)) try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs) except DistutilsExecError, msg: raise CompileError, msg # Return *all* object filenames, not just the ones we just built. return objects # compile () def create_static_lib (self, objects, output_libname, output_dir=None, debug=0): (objects, output_dir) = self._fix_object_args(objects, output_dir) output_filename = \ self.library_filename(output_libname, output_dir=output_dir) if self._need_link(objects, output_filename): self.mkpath(os.path.dirname(output_filename)) self.spawn(self.archiver + [output_filename] + objects + self.objects) # Not many Unices required ranlib anymore -- SunOS 4.x is, I # think the only major Unix that does. Maybe we need some # platform intelligence here to skip ranlib if it's not # needed -- or maybe Python's configure script took care of # it for us, hence the check for leading colon. if self.ranlib: try: self.spawn(self.ranlib + [output_filename]) except DistutilsExecError, msg: raise LibError, msg else: self.announce("skipping %s (up-to-date)" % output_filename) # create_static_lib () def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None): (objects, output_dir) = self._fix_object_args(objects, output_dir) (libraries, library_dirs, runtime_library_dirs) = \ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) if type(output_dir) not in (StringType, NoneType): raise TypeError, "'output_dir' must be a string or None" if output_dir is not None: output_filename = os.path.join(output_dir, output_filename) if self._need_link(objects, output_filename): ld_args = (objects + self.objects + lib_opts + ['-o', output_filename]) if debug: ld_args[:0] = ['-g'] if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) try: if target_desc == CCompiler.EXECUTABLE: self.spawn(self.linker_exe + ld_args) else: self.spawn(self.linker_so + ld_args) except DistutilsExecError, msg: raise LinkError, msg else: self.announce("skipping %s (up-to-date)" % output_filename) # link () # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. def library_dir_option (self, dir): return "-L" + dir def runtime_library_dir_option (self, dir): return "-R" + dir def library_option (self, lib): return "-l" + lib def find_library_file (self, dirs, lib, debug=0): for dir in dirs: shared = os.path.join( dir, self.library_filename(lib, lib_type='shared')) static = os.path.join( dir, self.library_filename(lib, lib_type='static')) # We're second-guessing the linker here, with not much hard # data to go on: GCC seems to prefer the shared library, so I'm # assuming that *all* Unix C compilers do. And of course I'm # ignoring even GCC's "-static" option. So sue me. if os.path.exists(shared): return shared elif os.path.exists(static): return static else: # Oops, didn't find it in *any* of 'dirs' return None # find_library_file () # class UnixCCompiler
romain-dartigues/ansible
refs/heads/devel
lib/ansible/modules/network/f5/bigip_cli_script.py
8
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_cli_script short_description: Manage CLI scripts on a BIG-IP description: - Manages CLI scripts on a BIG-IP. CLI scripts, otherwise known as tmshell scripts or TMSH scripts allow you to create custom scripts that can run to manage objects within a BIG-IP. version_added: 2.7 options: name: description: - Specifies the name of the script. required: True content: description: - The content of the script. - This parameter is typically used in conjunction with Ansible's C(file), or template lookup plugins. If this sounds foreign to you, see the examples in this documentation. description: description: - Description of the cli script. partition: description: - Device partition to manage resources on. default: Common state: description: - When C(present), ensures that the script exists. - When C(absent), ensures the script is removed. default: present choices: - present - absent extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a cli script from an existing file bigip_cli_script: name: foo content: "{{ lookup('file', '/absolute/path/to/cli/script.tcl') }}" provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost - name: Create a cli script from a jinja template representing a cli script bigip_cli_script: name: foo content: "{{ lookup('template', '/absolute/path/to/cli/script.tcl') }}" provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' param1: description: The new param1 value of the resource. returned: changed type: bool sample: true param2: description: The new param2 value of the resource. returned: changed type: string sample: Foo is bar ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.common import transform_name except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.common import transform_name class Parameters(AnsibleF5Parameters): api_map = { 'apiAnonymous': 'content', 'scriptChecksum': 'checksum', } api_attributes = [ 'apiAnonymous', 'description', ] returnables = [ 'description', 'content', ] updatables = [ 'description', 'content', ] class ApiParameters(Parameters): @property def ignore_verification(self): return "true" @property def content(self): return self._values['content'].strip() class ModuleParameters(Parameters): @property def ignore_verification(self): return "true" @property def content(self): if self._values['content'] is None: return None return self._values['content'].strip() @property def description(self): if self._values['description'] is None: return None elif self._values['description'] in ['none', '']: return '' return self._values['description'] class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def content(self): if self.want.content is None: return None if self.have.content is None: return self.want.content if self.want.content != self.have.content: return self.want.content @property def description(self): if self.want.description is None: return None if self.have.description is None and self.want.description == '': return None if self.want.description != self.have.description: return self.want.description class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True # Update any missing params # # The cli/script API is kinda weird in that it wont let us individually # PATCH the description. We appear to need to include the content otherwise # we get errors about us trying to replace procs that are needed by other # scripts, ie, the script we're trying to update. params = self.changes.api_params() if 'description' in params and 'content' not in params: self.changes.update({'content': self.have.content}) if 'content' in params and 'description' not in params: self.changes.update({'description': self.have.description}) self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/cli/script/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.delete(uri) if resp.status == 200: return True def read_current_from_device(self): # lgtm [py/similar-function] uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), description=dict(), content=dict(), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) exit_json(module, results, client) except F5ModuleError as ex: cleanup_tokens(client) fail_json(module, ex, client) if __name__ == '__main__': main()
eckucukoglu/arm-linux-gnueabihf
refs/heads/master
arm-linux-gnueabihf/libc/usr/lib/python2.7/test/test_getopt.py
134
# test_getopt.py # David Goodger <dgoodger@bigfoot.com> 2000-08-19 from test.test_support import verbose, run_doctest, run_unittest, EnvironmentVarGuard import unittest import getopt sentinel = object() class GetoptTests(unittest.TestCase): def setUp(self): self.env = EnvironmentVarGuard() if "POSIXLY_CORRECT" in self.env: del self.env["POSIXLY_CORRECT"] def tearDown(self): self.env.__exit__() del self.env def assertError(self, *args, **kwargs): self.assertRaises(getopt.GetoptError, *args, **kwargs) def test_short_has_arg(self): self.assertTrue(getopt.short_has_arg('a', 'a:')) self.assertFalse(getopt.short_has_arg('a', 'a')) self.assertError(getopt.short_has_arg, 'a', 'b') def test_long_has_args(self): has_arg, option = getopt.long_has_args('abc', ['abc=']) self.assertTrue(has_arg) self.assertEqual(option, 'abc') has_arg, option = getopt.long_has_args('abc', ['abc']) self.assertFalse(has_arg) self.assertEqual(option, 'abc') has_arg, option = getopt.long_has_args('abc', ['abcd']) self.assertFalse(has_arg) self.assertEqual(option, 'abcd') self.assertError(getopt.long_has_args, 'abc', ['def']) self.assertError(getopt.long_has_args, 'abc', []) self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde']) def test_do_shorts(self): opts, args = getopt.do_shorts([], 'a', 'a', []) self.assertEqual(opts, [('-a', '')]) self.assertEqual(args, []) opts, args = getopt.do_shorts([], 'a1', 'a:', []) self.assertEqual(opts, [('-a', '1')]) self.assertEqual(args, []) #opts, args = getopt.do_shorts([], 'a=1', 'a:', []) #self.assertEqual(opts, [('-a', '1')]) #self.assertEqual(args, []) opts, args = getopt.do_shorts([], 'a', 'a:', ['1']) self.assertEqual(opts, [('-a', '1')]) self.assertEqual(args, []) opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2']) self.assertEqual(opts, [('-a', '1')]) self.assertEqual(args, ['2']) self.assertError(getopt.do_shorts, [], 'a1', 'a', []) self.assertError(getopt.do_shorts, [], 'a', 'a:', []) def test_do_longs(self): opts, args = getopt.do_longs([], 'abc', ['abc'], []) self.assertEqual(opts, [('--abc', '')]) self.assertEqual(args, []) opts, args = getopt.do_longs([], 'abc=1', ['abc='], []) self.assertEqual(opts, [('--abc', '1')]) self.assertEqual(args, []) opts, args = getopt.do_longs([], 'abc=1', ['abcd='], []) self.assertEqual(opts, [('--abcd', '1')]) self.assertEqual(args, []) opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], []) self.assertEqual(opts, [('--abc', '')]) self.assertEqual(args, []) # Much like the preceding, except with a non-alpha character ("-") in # option name that precedes "="; failed in # http://python.org/sf/126863 opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], []) self.assertEqual(opts, [('--foo', '42')]) self.assertEqual(args, []) self.assertError(getopt.do_longs, [], 'abc=1', ['abc'], []) self.assertError(getopt.do_longs, [], 'abc', ['abc='], []) def test_getopt(self): # note: the empty string between '-a' and '--beta' is significant: # it simulates an empty string option argument ('-a ""') on the # command line. cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a', '', '--beta', 'arg1', 'arg2'] opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta']) self.assertEqual(opts, [('-a', '1'), ('-b', ''), ('--alpha', '2'), ('--beta', ''), ('-a', '3'), ('-a', ''), ('--beta', '')]) # Note ambiguity of ('-b', '') and ('-a', '') above. This must be # accounted for in the code that calls getopt(). self.assertEqual(args, ['arg1', 'arg2']) self.assertError(getopt.getopt, cmdline, 'a:b', ['alpha', 'beta']) def test_gnu_getopt(self): # Test handling of GNU style scanning mode. cmdline = ['-a', 'arg1', '-b', '1', '--alpha', '--beta=2'] # GNU style opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta=']) self.assertEqual(args, ['arg1']) self.assertEqual(opts, [('-a', ''), ('-b', '1'), ('--alpha', ''), ('--beta', '2')]) # recognize "-" as an argument opts, args = getopt.gnu_getopt(['-a', '-', '-b', '-'], 'ab:', []) self.assertEqual(args, ['-']) self.assertEqual(opts, [('-a', ''), ('-b', '-')]) # Posix style via + opts, args = getopt.gnu_getopt(cmdline, '+ab:', ['alpha', 'beta=']) self.assertEqual(opts, [('-a', '')]) self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2']) # Posix style via POSIXLY_CORRECT self.env["POSIXLY_CORRECT"] = "1" opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta=']) self.assertEqual(opts, [('-a', '')]) self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2']) def test_libref_examples(self): s = """ Examples from the Library Reference: Doc/lib/libgetopt.tex An example using only Unix style options: >>> import getopt >>> args = '-a -b -cfoo -d bar a1 a2'.split() >>> args ['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2'] >>> optlist, args = getopt.getopt(args, 'abc:d:') >>> optlist [('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')] >>> args ['a1', 'a2'] Using long option names is equally easy: >>> s = '--condition=foo --testing --output-file abc.def -x a1 a2' >>> args = s.split() >>> args ['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2'] >>> optlist, args = getopt.getopt(args, 'x', [ ... 'condition=', 'output-file=', 'testing']) >>> optlist [('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')] >>> args ['a1', 'a2'] """ import types m = types.ModuleType("libreftest", s) run_doctest(m, verbose) def test_issue4629(self): longopts, shortopts = getopt.getopt(['--help='], '', ['help=']) self.assertEqual(longopts, [('--help', '')]) longopts, shortopts = getopt.getopt(['--help=x'], '', ['help=']) self.assertEqual(longopts, [('--help', 'x')]) self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help']) def test_main(): run_unittest(GetoptTests) if __name__ == "__main__": test_main()
mathspace/python-social-auth
refs/heads/master
social/backends/foursquare.py
83
""" Foursquare OAuth2 backend, docs at: http://psa.matiasaguirre.net/docs/backends/foursquare.html """ from social.backends.oauth import BaseOAuth2 class FoursquareOAuth2(BaseOAuth2): name = 'foursquare' AUTHORIZATION_URL = 'https://foursquare.com/oauth2/authenticate' ACCESS_TOKEN_URL = 'https://foursquare.com/oauth2/access_token' ACCESS_TOKEN_METHOD = 'POST' API_VERSION = '20140128' def get_user_id(self, details, response): return response['response']['user']['id'] def get_user_details(self, response): """Return user details from Foursquare account""" info = response['response']['user'] email = info['contact']['email'] fullname, first_name, last_name = self.get_user_names( first_name=info.get('firstName', ''), last_name=info.get('lastName', '') ) return {'username': first_name + ' ' + last_name, 'fullname': fullname, 'first_name': first_name, 'last_name': last_name, 'email': email} def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" return self.get_json('https://api.foursquare.com/v2/users/self', params={'oauth_token': access_token, 'v': self.API_VERSION})
scotthartbti/android_external_chromium_org
refs/heads/kk44
chrome/common/extensions/docs/server2/url_constants.py
24
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. GITHUB_REPOS = 'https://api.github.com/repos' GITHUB_BASE = 'https://github.com/GoogleChrome/chrome-app-samples/tree/master' RAW_GITHUB_BASE = ('https://github.com/GoogleChrome/chrome-app-samples/raw/' 'master') OMAHA_PROXY_URL = 'http://omahaproxy.appspot.com/json' OMAHA_DEV_HISTORY = ('http://omahaproxy.appspot.com/history?channel=dev' '&os=win&json=1') SVN_URL = 'http://src.chromium.org/chrome' VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome' EXTENSIONS_SAMPLES = ('http://src.chromium.org/viewvc/chrome/trunk/src/chrome/' 'common/extensions/docs/examples') CODEREVIEW_SERVER = 'https://codereview.chromium.org'
ressu/SickGear
refs/heads/master
lib/dateutil/zoneinfo/__init__.py
83
# -*- coding: utf-8 -*- """ Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net> This module offers extensions to the standard Python datetime module. """ import logging import os from subprocess import call from tarfile import TarFile from dateutil.tz import tzfile __author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>" __license__ = "Simplified BSD" __all__ = ["setcachesize", "gettz", "rebuild"] CACHE = [] CACHESIZE = 10 class tzfile(tzfile): def __reduce__(self): return (gettz, (self._filename,)) def getzoneinfofile(): filenames = sorted(os.listdir(os.path.join(os.path.dirname(__file__)))) filenames.reverse() for entry in filenames: if entry.startswith("zoneinfo") and ".tar." in entry: return os.path.join(os.path.dirname(__file__), entry) return None ZONEINFOFILE = getzoneinfofile() del getzoneinfofile def setcachesize(size): global CACHESIZE, CACHE CACHESIZE = size del CACHE[size:] def gettz(name): tzinfo = None if ZONEINFOFILE: for cachedname, tzinfo in CACHE: if cachedname == name: break else: tf = TarFile.open(ZONEINFOFILE) try: zonefile = tf.extractfile(name) except KeyError: tzinfo = None else: tzinfo = tzfile(zonefile) tf.close() CACHE.insert(0, (name, tzinfo)) del CACHE[CACHESIZE:] return tzinfo def rebuild(filename, tag=None, format="gz"): """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* filename is the timezone tarball from ftp.iana.org/tz. """ import tempfile, shutil tmpdir = tempfile.mkdtemp() zonedir = os.path.join(tmpdir, "zoneinfo") moduledir = os.path.dirname(__file__) if tag: tag = "-"+tag targetname = "zoneinfo%s.tar.%s" % (tag, format) try: tf = TarFile.open(filename) # The "backwards" zone file contains links to other files, so must be # processed as last for name in sorted(tf.getnames(), key=lambda k: k != "backward" and k or "z"): if not (name.endswith(".sh") or name.endswith(".tab") or name == "leapseconds"): tf.extract(name, tmpdir) filepath = os.path.join(tmpdir, name) try: # zic will return errors for nontz files in the package # such as the Makefile or README, so check_call cannot # be used (or at least extra checks would be needed) call(["zic", "-d", zonedir, filepath]) except OSError as e: if e.errno == 2: logging.error( "Could not find zic. Perhaps you need to install " "libc-bin or some other package that provides it, " "or it's not in your PATH?") raise tf.close() target = os.path.join(moduledir, targetname) for entry in os.listdir(moduledir): if entry.startswith("zoneinfo") and ".tar." in entry: os.unlink(os.path.join(moduledir, entry)) tf = TarFile.open(target, "w:%s" % format) for entry in os.listdir(zonedir): entrypath = os.path.join(zonedir, entry) tf.add(entrypath, entry) tf.close() finally: shutil.rmtree(tmpdir)
JosmanPS/scikit-learn
refs/heads/master
sklearn/feature_selection/rfe.py
137
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Vincent Michel <vincent.michel@inria.fr> # Gilles Louppe <g.louppe@gmail.com> # # License: BSD 3 clause """Recursive feature elimination for feature ranking""" import warnings import numpy as np from ..utils import check_X_y, safe_sqr from ..utils.metaestimators import if_delegate_has_method from ..base import BaseEstimator from ..base import MetaEstimatorMixin from ..base import clone from ..base import is_classifier from ..cross_validation import check_cv from ..cross_validation import _safe_split, _score from ..metrics.scorer import check_scoring from .base import SelectorMixin class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin): """Feature ranking with recursive feature elimination. Given an external estimator that assigns weights to features (e.g., the coefficients of a linear model), the goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and weights are assigned to each one of them. Then, features whose absolute weights are the smallest are pruned from the current set features. That procedure is recursively repeated on the pruned set until the desired number of features to select is eventually reached. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : object A supervised learning estimator with a `fit` method that updates a `coef_` attribute that holds the fitted parameters. Important features must correspond to high absolute values in the `coef_` array. For instance, this is the case for most supervised learning algorithms such as Support Vector Classifiers and Generalized Linear Models from the `svm` and `linear_model` modules. n_features_to_select : int or None (default=None) The number of features to select. If `None`, half of the features are selected. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. estimator_params : dict Parameters for the external estimator. This attribute is deprecated as of version 0.16 and will be removed in 0.18. Use estimator initialisation or set_params method instead. verbose : int, default=0 Controls verbosity of output. Attributes ---------- n_features_ : int The number of selected features. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that ``ranking_[i]`` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. estimator_ : object The external estimator fit on the reduced dataset. Examples -------- The following example shows how to retrieve the 5 right informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFE >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFE(estimator, 5, step=1) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, n_features_to_select=None, step=1, estimator_params=None, verbose=0): self.estimator = estimator self.n_features_to_select = n_features_to_select self.step = step self.estimator_params = estimator_params self.verbose = verbose @property def _estimator_type(self): return self.estimator._estimator_type def fit(self, X, y): """Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values. """ return self._fit(X, y) def _fit(self, X, y, step_score=None): X, y = check_X_y(X, y, "csc") # Initialization n_features = X.shape[1] if self.n_features_to_select is None: n_features_to_select = n_features / 2 else: n_features_to_select = self.n_features_to_select if 0.0 < self.step < 1.0: step = int(max(1, self.step * n_features)) else: step = int(self.step) if step <= 0: raise ValueError("Step must be >0") if self.estimator_params is not None: warnings.warn("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. The " "parameter is no longer necessary because the value " "is set via the estimator initialisation or " "set_params method.", DeprecationWarning) support_ = np.ones(n_features, dtype=np.bool) ranking_ = np.ones(n_features, dtype=np.int) if step_score: self.scores_ = [] # Elimination while np.sum(support_) > n_features_to_select: # Remaining features features = np.arange(n_features)[support_] # Rank the remaining features estimator = clone(self.estimator) if self.estimator_params: estimator.set_params(**self.estimator_params) if self.verbose > 0: print("Fitting estimator with %d features." % np.sum(support_)) estimator.fit(X[:, features], y) # Get coefs if hasattr(estimator, 'coef_'): coefs = estimator.coef_ elif hasattr(estimator, 'feature_importances_'): coefs = estimator.feature_importances_ else: raise RuntimeError('The classifier does not expose ' '"coef_" or "feature_importances_" ' 'attributes') # Get ranks if coefs.ndim > 1: ranks = np.argsort(safe_sqr(coefs).sum(axis=0)) else: ranks = np.argsort(safe_sqr(coefs)) # for sparse case ranks is matrix ranks = np.ravel(ranks) # Eliminate the worse features threshold = min(step, np.sum(support_) - n_features_to_select) # Compute step score on the previous selection iteration # because 'estimator' must use features # that have not been eliminated yet if step_score: self.scores_.append(step_score(estimator, features)) support_[features[ranks][:threshold]] = False ranking_[np.logical_not(support_)] += 1 # Set final attributes features = np.arange(n_features)[support_] self.estimator_ = clone(self.estimator) if self.estimator_params: self.estimator_.set_params(**self.estimator_params) self.estimator_.fit(X[:, features], y) # Compute step score when only n_features_to_select features left if step_score: self.scores_.append(step_score(self.estimator_, features)) self.n_features_ = support_.sum() self.support_ = support_ self.ranking_ = ranking_ return self @if_delegate_has_method(delegate='estimator') def predict(self, X): """Reduce X to the selected features and then predict using the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- y : array of shape [n_samples] The predicted target values. """ return self.estimator_.predict(self.transform(X)) @if_delegate_has_method(delegate='estimator') def score(self, X, y): """Reduce X to the selected features and then return the score of the underlying estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. """ return self.estimator_.score(self.transform(X), y) def _get_support_mask(self): return self.support_ @if_delegate_has_method(delegate='estimator') def decision_function(self, X): return self.estimator_.decision_function(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): return self.estimator_.predict_proba(self.transform(X)) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): return self.estimator_.predict_log_proba(self.transform(X)) class RFECV(RFE, MetaEstimatorMixin): """Feature ranking with recursive feature elimination and cross-validated selection of the best number of features. Read more in the :ref:`User Guide <rfe>`. Parameters ---------- estimator : object A supervised learning estimator with a `fit` method that updates a `coef_` attribute that holds the fitted parameters. Important features must correspond to high absolute values in the `coef_` array. For instance, this is the case for most supervised learning algorithms such as Support Vector Classifiers and Generalized Linear Models from the `svm` and `linear_model` modules. step : int or float, optional (default=1) If greater than or equal to 1, then `step` corresponds to the (integer) number of features to remove at each iteration. If within (0.0, 1.0), then `step` corresponds to the percentage (rounded down) of features to remove at each iteration. cv : int or cross-validation generator, optional (default=None) If int, it is the number of folds. If None, 3-fold cross-validation is performed by default. Specific cross-validation objects can also be passed, see `sklearn.cross_validation module` for details. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. estimator_params : dict Parameters for the external estimator. This attribute is deprecated as of version 0.16 and will be removed in 0.18. Use estimator initialisation or set_params method instead. verbose : int, default=0 Controls verbosity of output. Attributes ---------- n_features_ : int The number of selected features with cross-validation. support_ : array of shape [n_features] The mask of selected features. ranking_ : array of shape [n_features] The feature ranking, such that `ranking_[i]` corresponds to the ranking position of the i-th feature. Selected (i.e., estimated best) features are assigned rank 1. grid_scores_ : array of shape [n_subsets_of_features] The cross-validation scores such that ``grid_scores_[i]`` corresponds to the CV score of the i-th subset of features. estimator_ : object The external estimator fit on the reduced dataset. Notes ----- The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1, where step is the number of features removed at each iteration. Examples -------- The following example shows how to retrieve the a-priori not known 5 informative features in the Friedman #1 dataset. >>> from sklearn.datasets import make_friedman1 >>> from sklearn.feature_selection import RFECV >>> from sklearn.svm import SVR >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0) >>> estimator = SVR(kernel="linear") >>> selector = RFECV(estimator, step=1, cv=5) >>> selector = selector.fit(X, y) >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, True, True, False, False, False, False, False], dtype=bool) >>> selector.ranking_ array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5]) References ---------- .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection for cancer classification using support vector machines", Mach. Learn., 46(1-3), 389--422, 2002. """ def __init__(self, estimator, step=1, cv=None, scoring=None, estimator_params=None, verbose=0): self.estimator = estimator self.step = step self.cv = cv self.scoring = scoring self.estimator_params = estimator_params self.verbose = verbose def fit(self, X, y): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like, shape = [n_samples] Target values (integers for classification, real numbers for regression). """ X, y = check_X_y(X, y, "csr") if self.estimator_params is not None: warnings.warn("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. " "The parameter is no longer necessary because the " "value is set via the estimator initialisation or " "set_params method.", DeprecationWarning) # Initialization cv = check_cv(self.cv, X, y, is_classifier(self.estimator)) scorer = check_scoring(self.estimator, scoring=self.scoring) n_features = X.shape[1] n_features_to_select = 1 # Determine the number of subsets of features scores = [] # Cross-validation for n, (train, test) in enumerate(cv): X_train, y_train = _safe_split(self.estimator, X, y, train) X_test, y_test = _safe_split(self.estimator, X, y, test, train) rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step, estimator_params=self.estimator_params, verbose=self.verbose - 1) rfe._fit(X_train, y_train, lambda estimator, features: _score(estimator, X_test[:, features], y_test, scorer)) scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1)) scores = np.sum(np.concatenate(scores, 0), 0) # The index in 'scores' when 'n_features' features are selected n_feature_index = np.ceil((n_features - n_features_to_select) / float(self.step)) n_features_to_select = max(n_features_to_select, n_features - ((n_feature_index - np.argmax(scores)) * self.step)) # Re-execute an elimination with best_k over the whole set rfe = RFE(estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step, estimator_params=self.estimator_params) rfe.fit(X, y) # Set final attributes self.support_ = rfe.support_ self.n_features_ = rfe.n_features_ self.ranking_ = rfe.ranking_ self.estimator_ = clone(self.estimator) if self.estimator_params: self.estimator_.set_params(**self.estimator_params) self.estimator_.fit(self.transform(X), y) # Fixing a normalization error, n is equal to len(cv) - 1 # here, the scores are normalized by len(cv) self.grid_scores_ = scores / len(cv) return self
paulmueller/ODTbrain
refs/heads/master
odtbrain/_prepare_sino.py
2
"""Sinogram preparation""" import numpy as np from scipy.stats import mode from skimage.restoration import unwrap_phase def align_unwrapped(sino): """Align an unwrapped phase array to zero-phase All operations are performed in-place. """ samples = [] if len(sino.shape) == 2: # 2D # take 1D samples at beginning and end of array samples.append(sino[:, 0]) samples.append(sino[:, 1]) samples.append(sino[:, 2]) samples.append(sino[:, -1]) samples.append(sino[:, -2]) elif len(sino.shape) == 3: # 3D # take 1D samples at beginning and end of array samples.append(sino[:, 0, 0]) samples.append(sino[:, 0, -1]) samples.append(sino[:, -1, 0]) samples.append(sino[:, -1, -1]) samples.append(sino[:, 0, 1]) # find discontinuities in the samples steps = np.zeros((len(samples), samples[0].shape[0])) for i in range(len(samples)): t = np.unwrap(samples[i]) steps[i] = samples[i] - t # if the majority believes so, add a step of PI remove = mode(steps, axis=0)[0][0] # obtain divmod min twopi = 2*np.pi minimum = divmod_neg(np.min(sino), twopi)[0] remove += minimum*twopi for i in range(len(sino)): sino[i] -= remove[i] def divmod_neg(a, b): """Return divmod with closest result to zero""" q, r = divmod(a, b) # make sure r is close to zero sr = np.sign(r) if np.abs(r) > b/2: q += sr r -= b * sr return q, r def sinogram_as_radon(uSin, align=True): r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection """ ndims = len(uSin.shape) if ndims == 2: # unwrapping is very important phiR = np.unwrap(np.angle(uSin), axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. phiR = np.angle(uSin) for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) return phiR def sinogram_as_rytov(uSin, u0=1, align=True): r"""Convert the complex wave field sinogram to the Rytov phase This method applies the Rytov approximation to the recorded complex wave sinogram. To achieve this, the following filter is applied: .. math:: u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r}) \ln\!\left( \frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})} +1 \right) This filter step effectively replaces the Born approximation :math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation :math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered field is equal to :math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+ u_\mathrm{0}(\mathbf{r})`. Parameters ---------- uSin: 2d or 3d complex ndarray The sinogram of the complex wave :math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. u0: ndarray of dimension as `uSin` or less, or int. The incident plane wave :math:`u_\mathrm{0}(\mathbf{r})` at the detector. If `u0` is "1", it is assumed that the data is already background-corrected ( `uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{ u_\mathrm{0}(\mathbf{r})} + 1` ). Note that if the reconstruction distance :math:`l_\mathrm{D}` of the original experiment is non-zero and `u0` is set to 1, then the reconstruction will be wrong; the field is not focused to the center of the reconstruction volume. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- uB: 2d or 3d real ndarray The Rytov-filtered complex sinogram :math:`u_\mathrm{B}(\mathbf{r})`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping """ ndims = len(uSin.shape) # imaginary part of the complex Rytov phase phiR = np.angle(uSin / u0) # real part of the complex Rytov phase lna = np.log(np.absolute(uSin / u0)) if ndims == 2: # unwrapping is very important phiR[:] = np.unwrap(phiR, axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) # rytovSin = u0*(np.log(a/a0) + 1j*phiR) # u0 is one - we already did background correction # complex rytov phase: rytovSin = 1j * phiR + lna return u0 * rytovSin
zaina/nova
refs/heads/master
nova/hacking/checks.py
8
# Copyright (c) 2012, Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import re import pep8 """ Guidelines for writing new hacking checks - Use only for Nova specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to nova/tests/unit/test_hacking.py """ UNDERSCORE_IMPORT_FILES = [] session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]") cfg_re = re.compile(r".*\scfg\.") vi_header_re = re.compile(r"^#\s+vim?:.+") virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/") virt_import_re = re.compile( r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)") virt_config_re = re.compile( r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)") asse_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " "(\w|\.|\'|\"|\[|\])+\)\)") asse_equal_type_re = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " "(\w|\.|\'|\"|\[|\])+\)") asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\(" r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\(" r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") asse_equal_end_with_none_re = re.compile( r"assertEqual\(.*?,\s+None\)$") asse_equal_start_with_none_re = re.compile( r"assertEqual\(None,") # NOTE(snikitin): Next two regexes weren't united to one for more readability. # asse_true_false_with_in_or_not_in regex checks # assertTrue/False(A in B) cases where B argument has no spaces # asse_true_false_with_in_or_not_in_spaces regex checks cases # where B argument has spaces and starts/ends with [, ', ". # For example: [1, 2, 3], "some string", 'another string'. # We have to separate these regexes to escape a false positives # results. B argument should have spaces only if it starts # with [, ", '. Otherwise checking of string # "assertFalse(A in B and C in D)" will be false positives. # In this case B argument is "B and C in D". asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\(" r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)" r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" r"[\[|'|\"](, .*)?\)") asse_raises_regexp = re.compile(r"assertRaisesRegexp\(") conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") log_translation = re.compile( r"(.)*LOG\.(audit|error|critical)\(\s*('|\")") log_translation_info = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_exception = re.compile( r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") translated_log = re.compile( r"(.)*LOG\.(audit|error|info|critical|exception)" "\(\s*_\(\s*('|\")") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") string_translation = re.compile(r"[^_]*_\(\s*('|\")") underscore_import_check = re.compile(r"(.)*import _(.)*") import_translation_for_log_or_exception = re.compile( r"(.)*(from\snova.i18n\simport)\s_") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") api_version_re = re.compile(r"@.*api_version") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") decorator_re = re.compile(r"@.*") http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False def import_no_db_in_virt(logical_line, filename): """Check for db calls from nova/virt As of grizzly-2 all the database calls have been removed from nova/virt, and we want to keep it that way. N307 """ if "nova/virt" in filename and not filename.endswith("fake.py"): if logical_line.startswith("from nova import db"): yield (0, "N307: nova.db import not allowed in nova/virt/*") def no_db_session_in_public_api(logical_line, filename): if "db/api.py" in filename: if session_check.match(logical_line): yield (0, "N309: public db api methods may not accept session") def use_timeutils_utcnow(logical_line, filename): # tools are OK to use the standard datetime module if "/tools/" in filename: return msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()" datetime_funcs = ['now', 'utcnow'] for f in datetime_funcs: pos = logical_line.find('datetime.%s' % f) if pos != -1: yield (pos, msg % f) def _get_virt_name(regex, data): m = regex.match(data) if m is None: return None driver = m.group(1) # Ignore things we mis-detect as virt drivers in the regex if driver in ["test_virt_drivers", "driver", "firewall", "disk", "api", "imagecache", "cpu", "hardware", "image"]: return None return driver def import_no_virt_driver_import_deps(physical_line, filename): """Check virt drivers' modules aren't imported by other drivers Modules under each virt driver's directory are considered private to that virt driver. Other drivers in Nova must not access those drivers. Any code that is to be shared should be refactored into a common module N311 """ thisdriver = _get_virt_name(virt_file_re, filename) thatdriver = _get_virt_name(virt_import_re, physical_line) if (thatdriver is not None and thisdriver is not None and thisdriver != thatdriver): return (0, "N311: importing code from other virt drivers forbidden") def import_no_virt_driver_config_deps(physical_line, filename): """Check virt drivers' config vars aren't used by other drivers Modules under each virt driver's directory are considered private to that virt driver. Other drivers in Nova must not use their config vars. Any config vars that are to be shared should be moved into a common module N312 """ thisdriver = _get_virt_name(virt_file_re, filename) thatdriver = _get_virt_name(virt_config_re, physical_line) if (thatdriver is not None and thisdriver is not None and thisdriver != thatdriver): return (0, "N312: using config vars from other virt drivers forbidden") def capital_cfg_help(logical_line, tokens): msg = "N313: capitalize help string" if cfg_re.match(logical_line): for t in range(len(tokens)): if tokens[t][1] == "help": txt = tokens[t + 2][1] if len(txt) > 1 and txt[1].islower(): yield(0, msg) def no_vi_headers(physical_line, line_number, lines): """Check for vi editor configuration in source files. By default vi modelines can only appear in the first or last 5 lines of a source file. N314 """ # NOTE(gilliard): line_number is 1-indexed if line_number <= 5 or line_number > len(lines) - 5: if vi_header_re.match(physical_line): return 0, "N314: Don't put vi configuration in source files" def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences N316 """ if asse_trueinst_re.match(logical_line): yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed") def assert_equal_type(logical_line): """Check for assertEqual(type(A), B) sentences N317 """ if asse_equal_type_re.match(logical_line): yield (0, "N317: assertEqual(type(A), B) sentences not allowed") def assert_equal_none(logical_line): """Check for assertEqual(A, None) or assertEqual(None, A) sentences N318 """ res = (asse_equal_start_with_none_re.search(logical_line) or asse_equal_end_with_none_re.search(logical_line)) if res: yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed") def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. N319 """ if logical_line.startswith("LOG.debug(_("): yield(0, "N319 Don't translate debug level logs") def no_import_translation_in_tests(logical_line, filename): """Check for 'from nova.i18n import _' N337 """ if 'nova/tests/' in filename: res = import_translation_for_log_or_exception.match(logical_line) if res: yield(0, "N337 Don't import translation in tests") def no_setting_conf_directly_in_tests(logical_line, filename): """Check for setting CONF.* attributes directly in tests The value can leak out of tests affecting how subsequent tests run. Using self.flags(option=value) is the preferred method to temporarily set config options in tests. N320 """ if 'nova/tests/' in filename: res = conf_attribute_set_re.match(logical_line) if res: yield (0, "N320: Setting CONF.* attributes directly in tests is " "forbidden. Use self.flags(option=value) instead") def validate_log_translations(logical_line, physical_line, filename): # Translations are not required in the test directory # and the Xen utilities if ("nova/tests" in filename or "plugins/xenserver/xenapi/etc/xapi.d" in filename): return if pep8.noqa(physical_line): return msg = "N328: LOG.info messages require translations `_LI()`!" if log_translation_info.match(logical_line): yield (0, msg) msg = "N329: LOG.exception messages require translations `_LE()`!" if log_translation_exception.match(logical_line): yield (0, msg) msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!" if log_translation_LW.match(logical_line): yield (0, msg) msg = "N321: Log messages require translations!" if log_translation.match(logical_line): yield (0, msg) def no_mutable_default_args(logical_line): msg = "N322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif (translated_log.match(logical_line) or string_translation.match(logical_line)): yield(0, "N323: Found use of _() without explicit import of _ !") def use_jsonutils(logical_line, filename): # the code below that path is not meant to be executed from neutron # tree where jsonutils module is present, so don't enforce its usage # for this subdirectory if "plugins/xenserver" in filename: return # tools are OK to use the standard json module if "/tools/" in filename: return msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s" if "json." in logical_line: json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: yield (pos, msg % {'fun': f[:-1]}) def check_api_version_decorator(logical_line, previous_logical, blank_before, filename): msg = ("N332: the api_version decorator must be the first decorator" " on a method.") if blank_before == 0 and re.match(api_version_re, logical_line) \ and re.match(decorator_re, previous_logical): yield(0, msg) class CheckForStrUnicodeExc(BaseASTChecker): """Checks for the use of str() or unicode() on an exception. This currently only handles the case where str() or unicode() is used in the scope of an exception handler. If the exception is passed into a function, returned from an assertRaises, or used on an exception created in the same scope, this does not catch it. """ CHECK_DESC = ('N325 str() and unicode() cannot be used on an ' 'exception. Remove or use six.text_type()') def __init__(self, tree, filename): super(CheckForStrUnicodeExc, self).__init__(tree, filename) self.name = [] self.already_checked = [] def visit_TryExcept(self, node): for handler in node.handlers: if handler.name: self.name.append(handler.name.id) super(CheckForStrUnicodeExc, self).generic_visit(node) self.name = self.name[:-1] else: super(CheckForStrUnicodeExc, self).generic_visit(node) def visit_Call(self, node): if self._check_call_names(node, ['str', 'unicode']): if node not in self.already_checked: self.already_checked.append(node) if isinstance(node.args[0], ast.Name): if node.args[0].id in self.name: self.add_error(node.args[0]) super(CheckForStrUnicodeExc, self).generic_visit(node) class CheckForTransAdd(BaseASTChecker): """Checks for the use of concatenation on a translated string. Translations should not be concatenated with other strings, but should instead include the string being added to the translated string to give the translators the most information. """ CHECK_DESC = ('N326 Translated messages cannot be concatenated. ' 'String should be included in translated message.') TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC'] def visit_BinOp(self, node): if isinstance(node.op, ast.Add): if self._check_call_names(node.left, self.TRANS_FUNC): self.add_error(node.left) elif self._check_call_names(node.right, self.TRANS_FUNC): self.add_error(node.right) super(CheckForTransAdd, self).generic_visit(node) def assert_true_or_false_with_in(logical_line): """Check for assertTrue/False(A in B), assertTrue/False(A not in B), assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) sentences. N334 """ res = (asse_true_false_with_in_or_not_in.search(logical_line) or asse_true_false_with_in_or_not_in_spaces.search(logical_line)) if res: yield (0, "N334: Use assertIn/NotIn(A, B) rather than " "assertTrue/False(A in/not in B) when checking collection " "contents.") def assert_raises_regexp(logical_line): """Check for usage of deprecated assertRaisesRegexp N335 """ res = asse_raises_regexp.search(logical_line) if res: yield (0, "N335: assertRaisesRegex must be used instead " "of assertRaisesRegexp") def dict_constructor_with_list_copy(logical_line): msg = ("N336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def assert_equal_in(logical_line): """Check for assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) sentences N338 """ res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or asse_equal_in_end_with_true_or_false_re.search(logical_line)) if res: yield (0, "N338: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in B, True/False) when checking collection " "contents.") def check_http_not_implemented(logical_line, physical_line, filename): msg = ("N339: HTTPNotImplemented response must be implemented with" " common raise_feature_not_supported().") if pep8.noqa(physical_line): return if "nova/api/openstack/compute/plugins/v3" not in filename: return if re.match(http_not_implemented_re, logical_line): yield(0, msg) def factory(register): register(import_no_db_in_virt) register(no_db_session_in_public_api) register(use_timeutils_utcnow) register(import_no_virt_driver_import_deps) register(import_no_virt_driver_config_deps) register(capital_cfg_help) register(no_vi_headers) register(no_import_translation_in_tests) register(assert_true_instance) register(assert_equal_type) register(assert_equal_none) register(assert_raises_regexp) register(no_translate_debug_logs) register(no_setting_conf_directly_in_tests) register(validate_log_translations) register(no_mutable_default_args) register(check_explicit_underscore_import) register(use_jsonutils) register(check_api_version_decorator) register(CheckForStrUnicodeExc) register(CheckForTransAdd) register(assert_true_or_false_with_in) register(dict_constructor_with_list_copy) register(assert_equal_in) register(check_http_not_implemented)
zack3241/incubator-airflow
refs/heads/master
airflow/operators/python_operator.py
7
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from builtins import str import dill import inspect import os import pickle import subprocess import sys import types from airflow.exceptions import AirflowException from airflow.models import BaseOperator, SkipMixin from airflow.utils.decorators import apply_defaults from airflow.utils.file import TemporaryDirectory from textwrap import dedent class PythonOperator(BaseOperator): """ Executes a Python callable :param python_callable: A reference to an object that is callable :type python_callable: python callable :param op_kwargs: a dictionary of keyword arguments that will get unpacked in your function :type op_kwargs: dict :param op_args: a list of positional arguments that will get unpacked when calling your callable :type op_args: list :param provide_context: if set to true, Airflow will pass a set of keyword arguments that can be used in your function. This set of kwargs correspond exactly to what you can use in your jinja templates. For this to work, you need to define `**kwargs` in your function header. :type provide_context: bool :param templates_dict: a dictionary where the values are templates that will get templated by the Airflow engine sometime between ``__init__`` and ``execute`` takes place and are made available in your callable's context after the template has been applied :type templates_dict: dict of str :param templates_exts: a list of file extensions to resolve while processing templated fields, for examples ``['.sql', '.hql']`` :type templates_exts: list(str) """ template_fields = ('templates_dict',) template_ext = tuple() ui_color = '#ffefeb' @apply_defaults def __init__( self, python_callable, op_args=None, op_kwargs=None, provide_context=False, templates_dict=None, templates_exts=None, *args, **kwargs): super(PythonOperator, self).__init__(*args, **kwargs) if not callable(python_callable): raise AirflowException('`python_callable` param must be callable') self.python_callable = python_callable self.op_args = op_args or [] self.op_kwargs = op_kwargs or {} self.provide_context = provide_context self.templates_dict = templates_dict if templates_exts: self.template_ext = templates_exts def execute(self, context): if self.provide_context: context.update(self.op_kwargs) context['templates_dict'] = self.templates_dict self.op_kwargs = context return_value = self.execute_callable() self.log.info("Done. Returned value was: %s", return_value) return return_value def execute_callable(self): return self.python_callable(*self.op_args, **self.op_kwargs) class BranchPythonOperator(PythonOperator, SkipMixin): """ Allows a workflow to "branch" or follow a single path following the execution of this task. It derives the PythonOperator and expects a Python function that returns the task_id to follow. The task_id returned should point to a task directly downstream from {self}. All other "branches" or directly downstream tasks are marked with a state of ``skipped`` so that these paths can't move forward. The ``skipped`` states are propageted downstream to allow for the DAG state to fill up and the DAG run's state to be inferred. Note that using tasks with ``depends_on_past=True`` downstream from ``BranchPythonOperator`` is logically unsound as ``skipped`` status will invariably lead to block tasks that depend on their past successes. ``skipped`` states propagates where all directly upstream tasks are ``skipped``. """ def execute(self, context): branch = super(BranchPythonOperator, self).execute(context) self.log.info("Following branch %s", branch) self.log.info("Marking other directly downstream tasks as skipped") downstream_tasks = context['task'].downstream_list self.log.debug("Downstream task_ids %s", downstream_tasks) skip_tasks = [t for t in downstream_tasks if t.task_id != branch] if downstream_tasks: self.skip(context['dag_run'], context['ti'].execution_date, skip_tasks) self.log.info("Done.") class ShortCircuitOperator(PythonOperator, SkipMixin): """ Allows a workflow to continue only if a condition is met. Otherwise, the workflow "short-circuits" and downstream tasks are skipped. The ShortCircuitOperator is derived from the PythonOperator. It evaluates a condition and short-circuits the workflow if the condition is False. Any downstream tasks are marked with a state of "skipped". If the condition is True, downstream tasks proceed as normal. The condition is determined by the result of `python_callable`. """ def execute(self, context): condition = super(ShortCircuitOperator, self).execute(context) self.log.info("Condition result is %s", condition) if condition: self.log.info('Proceeding with downstream tasks...') return self.log.info('Skipping downstream tasks...') downstream_tasks = context['task'].get_flat_relatives(upstream=False) self.log.debug("Downstream task_ids %s", downstream_tasks) if downstream_tasks: self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks) self.log.info("Done.") class PythonVirtualenvOperator(PythonOperator): """ Allows one to run a function in a virtualenv that is created and destroyed automatically (with certain caveats). The function must be defined using def, and not be part of a class. All imports must happen inside the function and no variables outside of the scope may be referenced. A global scope variable named virtualenv_string_args will be available (populated by string_args). In addition, one can pass stuff through op_args and op_kwargs, and one can use a return value. Note that if your virtualenv runs in a different Python major version than Airflow, you cannot use return values, op_args, or op_kwargs. You can use string_args though. :param python_callable: A python function with no references to outside variables, defined with def, which will be run in a virtualenv :type python_callable: function :param requirements: A list of requirements as specified in a pip install command :type requirements: list(str) :param python_version: The Python version to run the virtualenv with. Note that both 2 and 2.7 are acceptable forms. :type python_version: str :param use_dill: Whether to use dill to serialize the args and result (pickle is default). This allow more complex types but requires you to include dill in your requirements. :type use_dill: bool :param system_site_packages: Whether to include system_site_packages in your virtualenv. See virtualenv documentation for more information. :type system_site_packages: bool :param op_args: A list of positional arguments to pass to python_callable. :type op_kwargs: list :param op_kwargs: A dict of keyword arguments to pass to python_callable. :type op_kwargs: dict :param string_args: Strings that are present in the global var virtualenv_string_args, available to python_callable at runtime as a list(str). Note that args are split by newline. :type string_args: list(str) :param templates_dict: a dictionary where the values are templates that will get templated by the Airflow engine sometime between ``__init__`` and ``execute`` takes place and are made available in your callable's context after the template has been applied :type templates_dict: dict of str :param templates_exts: a list of file extensions to resolve while processing templated fields, for examples ``['.sql', '.hql']`` :type templates_exts: list(str) """ def __init__(self, python_callable, requirements=None, python_version=None, use_dill=False, system_site_packages=True, op_args=None, op_kwargs=None, string_args=None, templates_dict=None, templates_exts=None, *args, **kwargs): super(PythonVirtualenvOperator, self).__init__( python_callable=python_callable, op_args=op_args, op_kwargs=op_kwargs, templates_dict=templates_dict, templates_exts=templates_exts, provide_context=False, *args, **kwargs) self.requirements = requirements or [] self.string_args = string_args or [] self.python_version = python_version self.use_dill = use_dill self.system_site_packages = system_site_packages # check that dill is present if needed dill_in_requirements = map(lambda x: x.lower().startswith('dill'), self.requirements) if (not system_site_packages) and use_dill and not any(dill_in_requirements): raise AirflowException('If using dill, dill must be in the environment ' + 'either via system_site_packages or requirements') # check that a function is passed, and that it is not a lambda if (not isinstance(self.python_callable, types.FunctionType) or self.python_callable.__name__ == (lambda x: 0).__name__): raise AirflowException('{} only supports functions for python_callable arg', self.__class__.__name__) # check that args are passed iff python major version matches if (python_version is not None and str(python_version)[0] != str(sys.version_info[0]) and self._pass_op_args()): raise AirflowException("Passing op_args or op_kwargs is not supported across " "different Python major versions " "for PythonVirtualenvOperator. Please use string_args.") def execute_callable(self): with TemporaryDirectory(prefix='venv') as tmp_dir: if self.templates_dict: self.op_kwargs['templates_dict'] = self.templates_dict # generate filenames input_filename = os.path.join(tmp_dir, 'script.in') output_filename = os.path.join(tmp_dir, 'script.out') string_args_filename = os.path.join(tmp_dir, 'string_args.txt') script_filename = os.path.join(tmp_dir, 'script.py') # set up virtualenv self._execute_in_subprocess(self._generate_virtualenv_cmd(tmp_dir)) cmd = self._generate_pip_install_cmd(tmp_dir) if cmd: self._execute_in_subprocess(cmd) self._write_args(input_filename) self._write_script(script_filename) self._write_string_args(string_args_filename) # execute command in virtualenv self._execute_in_subprocess( self._generate_python_cmd(tmp_dir, script_filename, input_filename, output_filename, string_args_filename)) return self._read_result(output_filename) def _pass_op_args(self): # we should only pass op_args if any are given to us return len(self.op_args) + len(self.op_kwargs) > 0 def _execute_in_subprocess(self, cmd): try: self.log.info("Executing cmd\n{}".format(cmd)) output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True) if output: self.log.info("Got output\n{}".format(output)) except subprocess.CalledProcessError as e: self.log.info("Got error output\n{}".format(e.output)) raise def _write_string_args(self, filename): # writes string_args to a file, which are read line by line with open(filename, 'w') as f: f.write('\n'.join(map(str, self.string_args))) def _write_args(self, input_filename): # serialize args to file if self._pass_op_args(): with open(input_filename, 'wb') as f: arg_dict = ({'args': self.op_args, 'kwargs': self.op_kwargs}) if self.use_dill: dill.dump(arg_dict, f) else: pickle.dump(arg_dict, f) def _read_result(self, output_filename): if os.stat(output_filename).st_size == 0: return None with open(output_filename, 'rb') as f: try: if self.use_dill: return dill.load(f) else: return pickle.load(f) except ValueError: self.log.error("Error deserializing result. Note that result deserialization " "is not supported across major Python versions.") raise def _write_script(self, script_filename): with open(script_filename, 'w') as f: python_code = self._generate_python_code() self.log.debug('Writing code to file\n{}'.format(python_code)) f.write(python_code) def _generate_virtualenv_cmd(self, tmp_dir): cmd = ['virtualenv', tmp_dir] if self.system_site_packages: cmd.append('--system-site-packages') if self.python_version is not None: cmd.append('--python=python{}'.format(self.python_version)) return cmd def _generate_pip_install_cmd(self, tmp_dir): if len(self.requirements) == 0: return [] else: # direct path alleviates need to activate cmd = ['{}/bin/pip'.format(tmp_dir), 'install'] return cmd + self.requirements def _generate_python_cmd(self, tmp_dir, script_filename, input_filename, output_filename, string_args_filename): # direct path alleviates need to activate return ['{}/bin/python'.format(tmp_dir), script_filename, input_filename, output_filename, string_args_filename] def _generate_python_code(self): if self.use_dill: pickling_library = 'dill' else: pickling_library = 'pickle' fn = self.python_callable # dont try to read pickle if we didnt pass anything if self._pass_op_args(): load_args_line = 'with open(sys.argv[1], "rb") as f: arg_dict = {}.load(f)'.format(pickling_library) else: load_args_line = 'arg_dict = {"args": [], "kwargs": {}}' # no indents in original code so we can accept any type of indents in the original function # we deserialize args, call function, serialize result if necessary return dedent("""\ import {pickling_library} import sys {load_args_code} args = arg_dict["args"] kwargs = arg_dict["kwargs"] with open(sys.argv[3], 'r') as f: virtualenv_string_args = list(map(lambda x: x.strip(), list(f))) {python_callable_lines} res = {python_callable_name}(*args, **kwargs) with open(sys.argv[2], 'wb') as f: res is not None and {pickling_library}.dump(res, f) """).format( load_args_code=load_args_line, python_callable_lines=dedent(inspect.getsource(fn)), python_callable_name=fn.__name__, pickling_library=pickling_library) self.log.info("Done.")
Yubico/yubikey-piv-manager-dpkg
refs/heads/master
pivman/yubicommon/ctypes/libloader.py
3
# ---------------------------------------------------------------------------- # Copyright (c) 2008 David James # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- from __future__ import absolute_import import os.path import re import sys import glob import platform import ctypes import ctypes.util def _environ_path(name): if name in os.environ: return os.environ[name].split(":") else: return [] class LibraryLoader(object): def __init__(self): self.other_dirs = [] def load_library(self, libname, version=None): """Given the name of a library, load it.""" paths = self.getpaths(libname) for path in paths: if os.path.exists(path): return self.load(path) raise ImportError("%s not found." % libname) def load(self, path): """Given a path to a library, load it.""" try: # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead # of the default RTLD_LOCAL. Without this, you end up with # libraries not being loadable, resulting in "Symbol not found" # errors if sys.platform == 'darwin': return ctypes.CDLL(path, ctypes.RTLD_GLOBAL) else: return ctypes.cdll.LoadLibrary(path) except OSError as e: raise ImportError(e) def getpaths(self, libname): """Return a list of paths where the library might be found.""" if os.path.isabs(libname): yield libname else: # FIXME / TODO return '.' and os.path.dirname(__file__) for path in self.getplatformpaths(libname): yield path path = ctypes.util.find_library(libname) if path: yield path def getplatformpaths(self, libname): return [] # Darwin (Mac OS X) class DarwinLibraryLoader(LibraryLoader): name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib", "%s.so", "%s.bundle", "%s"] def getplatformpaths(self, libname): if os.path.pathsep in libname: names = [libname] else: names = [format % libname for format in self.name_formats] for dir in self.getdirs(libname): for name in names: yield os.path.join(dir, name) def getdirs(self, libname): '''Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/ DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html Before commencing the standard search, the method first checks the bundle's ``Frameworks`` directory if the application is running within a bundle (OS X .app). ''' dyld_fallback_library_path = _environ_path( "DYLD_FALLBACK_LIBRARY_PATH") if not dyld_fallback_library_path: dyld_fallback_library_path = [os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib'] dirs = [] if '/' in libname: dirs.extend(_environ_path("DYLD_LIBRARY_PATH")) else: dirs.extend(_environ_path("LD_LIBRARY_PATH")) dirs.extend(_environ_path("DYLD_LIBRARY_PATH")) dirs.extend(self.other_dirs) dirs.append(".") dirs.append(os.path.dirname(__file__)) if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app': dirs.append(os.path.join( os.environ['RESOURCEPATH'], '..', 'Frameworks')) if hasattr(sys, 'frozen'): dirs.append(sys._MEIPASS) dirs.extend(dyld_fallback_library_path) return dirs # Posix class PosixLibraryLoader(LibraryLoader): _ld_so_cache = None def load_library(self, libname, version=None): try: return self.load(ctypes.util.find_library(libname)) except ImportError: return super(PosixLibraryLoader, self).load_library( libname, version) def _create_ld_so_cache(self): # Recreate search path followed by ld.so. This is going to be # slow to build, and incorrect (ld.so uses ld.so.cache, which may # not be up-to-date). Used only as fallback for distros without # /sbin/ldconfig. # # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted. directories = [] for name in ("LD_LIBRARY_PATH", "SHLIB_PATH", # HPUX "LIBPATH", # OS/2, AIX "LIBRARY_PATH", # BE/OS ): if name in os.environ: directories.extend(os.environ[name].split(os.pathsep)) directories.extend(self.other_dirs) directories.append(".") directories.append(os.path.dirname(__file__)) try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')]) except IOError: pass unix_lib_dirs_list = ['/lib', '/usr/lib', '/lib64', '/usr/lib64'] if sys.platform.startswith('linux'): # Try and support multiarch work in Ubuntu # https://wiki.ubuntu.com/MultiarchSpec bitage = platform.architecture()[0] if bitage.startswith('32'): # Assume Intel/AMD x86 compat unix_lib_dirs_list += [ '/lib/i386-linux-gnu', '/usr/lib/i386-linux-gnu'] elif bitage.startswith('64'): # Assume Intel/AMD x86 compat unix_lib_dirs_list += [ '/lib/x86_64-linux-gnu', '/usr/lib/x86_64-linux-gnu'] else: # guess... unix_lib_dirs_list += glob.glob('/lib/*linux-gnu') directories.extend(unix_lib_dirs_list) cache = {} lib_re = re.compile(r'lib(.*)\.s[ol]') ext_re = re.compile(r'\.s[ol]$') for dir in directories: try: for path in glob.glob("%s/*.s[ol]*" % dir): file = os.path.basename(path) # Index by filename if file not in cache: cache[file] = path # Index by library name match = lib_re.match(file) if match: library = match.group(1) if library not in cache: cache[library] = path except OSError: pass self._ld_so_cache = cache def getplatformpaths(self, libname): if self._ld_so_cache is None: self._create_ld_so_cache() result = self._ld_so_cache.get(libname) if result: yield result path = ctypes.util.find_library(libname) if path: yield os.path.join("/lib", path) # Windows class _WindowsLibrary(object): def __init__(self, path): self.cdll = ctypes.cdll.LoadLibrary(path) self.windll = ctypes.windll.LoadLibrary(path) def __getattr__(self, name): try: return getattr(self.cdll, name) except AttributeError: try: return getattr(self.windll, name) except AttributeError: raise class WindowsLibraryLoader(LibraryLoader): name_formats = ["%s.dll", "lib%s.dll", "%slib.dll"] def load_library(self, libname, version=None): try: result = LibraryLoader.load_library(self, libname, version) except ImportError: result = None if os.path.sep not in libname: formats = self.name_formats[:] if version: formats.append("lib%%s-%s.dll" % version) for name in formats: try: result = getattr(ctypes.cdll, name % libname) if result: break except WindowsError: result = None if result is None: try: result = getattr(ctypes.cdll, libname) except WindowsError: result = None if result is None: raise ImportError("%s not found." % libname) return result def load(self, path): return _WindowsLibrary(path) def getplatformpaths(self, libname): if os.path.sep not in libname: for name in self.name_formats: dll_in_current_dir = os.path.abspath(name % libname) if os.path.exists(dll_in_current_dir): yield dll_in_current_dir path = ctypes.util.find_library(name % libname) if path: yield path # Platform switching # If your value of sys.platform does not appear in this dict, please contact # the Ctypesgen maintainers. loaderclass = { "darwin": DarwinLibraryLoader, "cygwin": WindowsLibraryLoader, "win32": WindowsLibraryLoader } loader = loaderclass.get(sys.platform, PosixLibraryLoader)() def add_library_search_dirs(other_dirs): loader.other_dirs = other_dirs load_library = loader.load_library del loaderclass
mlorbetske/PTVS
refs/heads/master
Python/Tests/TestData/Xaml/EmptyXName.py
18
import wpf from System.Windows import Application, Window class MyWindow(Window): def __init__(self): wpf.LoadComponent(self, 'EmptyXName.xaml') if __name__ == '__main__': Application().Run(MyWindow())
jazkarta/edx-platform
refs/heads/master
lms/djangoapps/course_structure_api/v0/serializers.py
39
""" Django REST Framework Serializers """ from django.core.urlresolvers import reverse from rest_framework import serializers from courseware.courses import course_image_url class CourseSerializer(serializers.Serializer): """ Serializer for Courses """ id = serializers.CharField() # pylint: disable=invalid-name name = serializers.CharField(source='display_name') category = serializers.CharField() org = serializers.SerializerMethodField('get_org') run = serializers.SerializerMethodField('get_run') course = serializers.SerializerMethodField('get_course') uri = serializers.SerializerMethodField('get_uri') image_url = serializers.SerializerMethodField('get_image_url') start = serializers.DateTimeField() end = serializers.DateTimeField() def get_org(self, course): """ Gets the course org """ return course.id.org def get_run(self, course): """ Gets the course run """ return course.id.run def get_course(self, course): """ Gets the course """ return course.id.course def get_uri(self, course): """ Builds course detail uri """ request = self.context['request'] return request.build_absolute_uri(reverse('course_structure_api:v0:detail', kwargs={'course_id': course.id})) def get_image_url(self, course): """ Get the course image URL """ return course_image_url(course)
wxgeo/geophar
refs/heads/master
wxgeometrie/sympy/core/tests/test_args.py
1
"""Test whether all elements of cls.args are instances of Basic. """ # NOTE: keep tests sorted by (module, class name) key. If a class can't # be instantiated, add it here anyway with @SKIP("abstract class) (see # e.g. Function). import os import re import warnings import io from sympy import (Basic, S, symbols, sqrt, sin, oo, Interval, exp, Lambda, pi, Eq, log) from sympy.core.compatibility import range from sympy.utilities.pytest import XFAIL, SKIP from sympy.utilities.exceptions import SymPyDeprecationWarning x, y, z = symbols('x,y,z') def test_all_classes_are_tested(): this = os.path.split(__file__)[0] path = os.path.join(this, os.pardir, os.pardir) sympy_path = os.path.abspath(path) prefix = os.path.split(sympy_path)[0] + os.sep re_cls = re.compile(r"^class ([A-Za-z][A-Za-z0-9_]*)\s*\(", re.MULTILINE) modules = {} for root, dirs, files in os.walk(sympy_path): module = root.replace(prefix, "").replace(os.sep, ".") for file in files: if file.startswith(("_", "test_", "bench_")): continue if not file.endswith(".py"): continue with io.open(os.path.join(root, file), "r", encoding='utf-8') as f: text = f.read() submodule = module + '.' + file[:-3] names = re_cls.findall(text) if not names: continue try: mod = __import__(submodule, fromlist=names) except ImportError: continue def is_Basic(name): cls = getattr(mod, name) if hasattr(cls, '_sympy_deprecated_func'): cls = cls._sympy_deprecated_func return issubclass(cls, Basic) names = list(filter(is_Basic, names)) if names: modules[submodule] = names ns = globals() failed = [] for module, names in modules.items(): mod = module.replace('.', '__') for name in names: test = 'test_' + mod + '__' + name if test not in ns: failed.append(module + '.' + name) # reset all SymPyDeprecationWarning into errors warnings.simplefilter("error", category=SymPyDeprecationWarning) assert not failed, "Missing classes: %s. Please add tests for these to sympy/core/tests/test_args.py." % ", ".join(failed) def _test_args(obj): return all(isinstance(arg, Basic) for arg in obj.args) def test_sympy__assumptions__assume__AppliedPredicate(): from sympy.assumptions.assume import AppliedPredicate, Predicate from sympy import Q assert _test_args(AppliedPredicate(Predicate("test"), 2)) assert _test_args(Q.is_true(True)) def test_sympy__assumptions__assume__Predicate(): from sympy.assumptions.assume import Predicate assert _test_args(Predicate("test")) def test_sympy__assumptions__sathandlers__UnevaluatedOnFree(): from sympy.assumptions.sathandlers import UnevaluatedOnFree from sympy import Q assert _test_args(UnevaluatedOnFree(Q.positive)) assert _test_args(UnevaluatedOnFree(Q.positive(x))) assert _test_args(UnevaluatedOnFree(Q.positive(x*y))) def test_sympy__assumptions__sathandlers__AllArgs(): from sympy.assumptions.sathandlers import AllArgs from sympy import Q assert _test_args(AllArgs(Q.positive)) assert _test_args(AllArgs(Q.positive(x))) assert _test_args(AllArgs(Q.positive(x*y))) def test_sympy__assumptions__sathandlers__AnyArgs(): from sympy.assumptions.sathandlers import AnyArgs from sympy import Q assert _test_args(AnyArgs(Q.positive)) assert _test_args(AnyArgs(Q.positive(x))) assert _test_args(AnyArgs(Q.positive(x*y))) def test_sympy__assumptions__sathandlers__ExactlyOneArg(): from sympy.assumptions.sathandlers import ExactlyOneArg from sympy import Q assert _test_args(ExactlyOneArg(Q.positive)) assert _test_args(ExactlyOneArg(Q.positive(x))) assert _test_args(ExactlyOneArg(Q.positive(x*y))) def test_sympy__assumptions__sathandlers__CheckOldAssump(): from sympy.assumptions.sathandlers import CheckOldAssump from sympy import Q assert _test_args(CheckOldAssump(Q.positive)) assert _test_args(CheckOldAssump(Q.positive(x))) assert _test_args(CheckOldAssump(Q.positive(x*y))) def test_sympy__assumptions__sathandlers__CheckIsPrime(): from sympy.assumptions.sathandlers import CheckIsPrime from sympy import Q # Input must be a number assert _test_args(CheckIsPrime(Q.positive)) assert _test_args(CheckIsPrime(Q.positive(5))) @SKIP("abstract Class") def test_sympy__codegen__ast__AugmentedAssignment(): from sympy.codegen.ast import AugmentedAssignment assert _test_args(AugmentedAssignment(x, 1)) def test_sympy__codegen__ast__AddAugmentedAssignment(): from sympy.codegen.ast import AddAugmentedAssignment assert _test_args(AddAugmentedAssignment(x, 1)) def test_sympy__codegen__ast__SubAugmentedAssignment(): from sympy.codegen.ast import SubAugmentedAssignment assert _test_args(SubAugmentedAssignment(x, 1)) def test_sympy__codegen__ast__MulAugmentedAssignment(): from sympy.codegen.ast import MulAugmentedAssignment assert _test_args(MulAugmentedAssignment(x, 1)) def test_sympy__codegen__ast__DivAugmentedAssignment(): from sympy.codegen.ast import DivAugmentedAssignment assert _test_args(DivAugmentedAssignment(x, 1)) def test_sympy__codegen__ast__ModAugmentedAssignment(): from sympy.codegen.ast import ModAugmentedAssignment assert _test_args(ModAugmentedAssignment(x, 1)) def test_sympy__codegen__ast__CodeBlock(): from sympy.codegen.ast import CodeBlock, Assignment assert _test_args(CodeBlock(Assignment(x, 1), Assignment(y, 2))) def test_sympy__codegen__ast__For(): from sympy.codegen.ast import For, CodeBlock, AddAugmentedAssignment from sympy import Range assert _test_args(For(x, Range(10), CodeBlock(AddAugmentedAssignment(y, 1)))) def test_sympy__codegen__ast__Token(): from sympy.codegen.ast import Token assert _test_args(Token()) def test_sympy__codegen__ast__ContinueToken(): from sympy.codegen.ast import ContinueToken assert _test_args(ContinueToken()) def test_sympy__codegen__ast__BreakToken(): from sympy.codegen.ast import BreakToken assert _test_args(BreakToken()) def test_sympy__codegen__ast__NoneToken(): from sympy.codegen.ast import NoneToken assert _test_args(NoneToken()) def test_sympy__codegen__ast__String(): from sympy.codegen.ast import String assert _test_args(String('foobar')) def test_sympy__codegen__ast__QuotedString(): from sympy.codegen.ast import QuotedString assert _test_args(QuotedString('foobar')) def test_sympy__codegen__ast__Comment(): from sympy.codegen.ast import Comment assert _test_args(Comment('this is a comment')) def test_sympy__codegen__ast__Node(): from sympy.codegen.ast import Node assert _test_args(Node()) assert _test_args(Node(attrs={1, 2, 3})) def test_sympy__codegen__ast__Type(): from sympy.codegen.ast import Type assert _test_args(Type('float128')) def test_sympy__codegen__ast__IntBaseType(): from sympy.codegen.ast import IntBaseType assert _test_args(IntBaseType('bigint')) def test_sympy__codegen__ast___SizedIntType(): from sympy.codegen.ast import _SizedIntType assert _test_args(_SizedIntType('int128', 128)) def test_sympy__codegen__ast__SignedIntType(): from sympy.codegen.ast import SignedIntType assert _test_args(SignedIntType('int128_with_sign', 128)) def test_sympy__codegen__ast__UnsignedIntType(): from sympy.codegen.ast import UnsignedIntType assert _test_args(UnsignedIntType('unt128', 128)) def test_sympy__codegen__ast__FloatBaseType(): from sympy.codegen.ast import FloatBaseType assert _test_args(FloatBaseType('positive_real')) def test_sympy__codegen__ast__FloatType(): from sympy.codegen.ast import FloatType assert _test_args(FloatType('float242', 242, nmant=142, nexp=99)) def test_sympy__codegen__ast__ComplexBaseType(): from sympy.codegen.ast import ComplexBaseType assert _test_args(ComplexBaseType('positive_cmplx')) def test_sympy__codegen__ast__ComplexType(): from sympy.codegen.ast import ComplexType assert _test_args(ComplexType('complex42', 42, nmant=15, nexp=5)) def test_sympy__codegen__ast__Attribute(): from sympy.codegen.ast import Attribute assert _test_args(Attribute('noexcept')) def test_sympy__codegen__ast__Variable(): from sympy.codegen.ast import Variable, Type, value_const assert _test_args(Variable(x)) assert _test_args(Variable(y, Type('float32'), {value_const})) assert _test_args(Variable(z, type=Type('float64'))) def test_sympy__codegen__ast__Pointer(): from sympy.codegen.ast import Pointer, Type, pointer_const assert _test_args(Pointer(x)) assert _test_args(Pointer(y, type=Type('float32'))) assert _test_args(Pointer(z, Type('float64'), {pointer_const})) def test_sympy__codegen__ast__Declaration(): from sympy.codegen.ast import Declaration, Variable, Type vx = Variable(x, type=Type('float')) assert _test_args(Declaration(vx)) def test_sympy__codegen__ast__While(): from sympy.codegen.ast import While, AddAugmentedAssignment assert _test_args(While(abs(x) < 1, [AddAugmentedAssignment(x, -1)])) def test_sympy__codegen__ast__Scope(): from sympy.codegen.ast import Scope, AddAugmentedAssignment assert _test_args(Scope([AddAugmentedAssignment(x, -1)])) def test_sympy__codegen__ast__Stream(): from sympy.codegen.ast import Stream assert _test_args(Stream('stdin')) def test_sympy__codegen__ast__Print(): from sympy.codegen.ast import Print assert _test_args(Print([x, y])) assert _test_args(Print([x, y], "%d %d")) def test_sympy__codegen__ast__FunctionPrototype(): from sympy.codegen.ast import FunctionPrototype, real, Declaration, Variable inp_x = Declaration(Variable(x, type=real)) assert _test_args(FunctionPrototype(real, 'pwer', [inp_x])) def test_sympy__codegen__ast__FunctionDefinition(): from sympy.codegen.ast import FunctionDefinition, real, Declaration, Variable, Assignment inp_x = Declaration(Variable(x, type=real)) assert _test_args(FunctionDefinition(real, 'pwer', [inp_x], [Assignment(x, x**2)])) def test_sympy__codegen__ast__Return(): from sympy.codegen.ast import Return assert _test_args(Return(x)) def test_sympy__codegen__ast__FunctionCall(): from sympy.codegen.ast import FunctionCall assert _test_args(FunctionCall('pwer', [x])) def test_sympy__codegen__ast__Element(): from sympy.codegen.ast import Element assert _test_args(Element('x', range(3))) def test_sympy__codegen__cnodes__CommaOperator(): from sympy.codegen.cnodes import CommaOperator assert _test_args(CommaOperator(1, 2)) def test_sympy__codegen__cnodes__goto(): from sympy.codegen.cnodes import goto assert _test_args(goto('early_exit')) def test_sympy__codegen__cnodes__Label(): from sympy.codegen.cnodes import Label assert _test_args(Label('early_exit')) def test_sympy__codegen__cnodes__PreDecrement(): from sympy.codegen.cnodes import PreDecrement assert _test_args(PreDecrement(x)) def test_sympy__codegen__cnodes__PostDecrement(): from sympy.codegen.cnodes import PostDecrement assert _test_args(PostDecrement(x)) def test_sympy__codegen__cnodes__PreIncrement(): from sympy.codegen.cnodes import PreIncrement assert _test_args(PreIncrement(x)) def test_sympy__codegen__cnodes__PostIncrement(): from sympy.codegen.cnodes import PostIncrement assert _test_args(PostIncrement(x)) def test_sympy__codegen__cnodes__struct(): from sympy.codegen.ast import real, Variable from sympy.codegen.cnodes import struct assert _test_args(struct(declarations=[ Variable(x, type=real), Variable(y, type=real) ])) def test_sympy__codegen__cnodes__union(): from sympy.codegen.ast import float32, int32, Variable from sympy.codegen.cnodes import union assert _test_args(union(declarations=[ Variable(x, type=float32), Variable(y, type=int32) ])) def test_sympy__codegen__cxxnodes__using(): from sympy.codegen.cxxnodes import using assert _test_args(using('std::vector')) assert _test_args(using('std::vector', 'vec')) def test_sympy__codegen__fnodes__Program(): from sympy.codegen.fnodes import Program assert _test_args(Program('foobar', [])) def test_sympy__codegen__fnodes__Module(): from sympy.codegen.fnodes import Module assert _test_args(Module('foobar', [], [])) def test_sympy__codegen__fnodes__Subroutine(): from sympy.codegen.fnodes import Subroutine x = symbols('x', real=True) assert _test_args(Subroutine('foo', [x], [])) def test_sympy__codegen__fnodes__GoTo(): from sympy.codegen.fnodes import GoTo assert _test_args(GoTo([10])) assert _test_args(GoTo([10, 20], x > 1)) def test_sympy__codegen__fnodes__FortranReturn(): from sympy.codegen.fnodes import FortranReturn assert _test_args(FortranReturn(10)) def test_sympy__codegen__fnodes__Extent(): from sympy.codegen.fnodes import Extent assert _test_args(Extent()) assert _test_args(Extent(None)) assert _test_args(Extent(':')) assert _test_args(Extent(-3, 4)) assert _test_args(Extent(x, y)) def test_sympy__codegen__fnodes__use_rename(): from sympy.codegen.fnodes import use_rename assert _test_args(use_rename('loc', 'glob')) def test_sympy__codegen__fnodes__use(): from sympy.codegen.fnodes import use assert _test_args(use('modfoo', only='bar')) def test_sympy__codegen__fnodes__SubroutineCall(): from sympy.codegen.fnodes import SubroutineCall assert _test_args(SubroutineCall('foo', ['bar', 'baz'])) def test_sympy__codegen__fnodes__Do(): from sympy.codegen.fnodes import Do assert _test_args(Do([], 'i', 1, 42)) def test_sympy__codegen__fnodes__ImpliedDoLoop(): from sympy.codegen.fnodes import ImpliedDoLoop assert _test_args(ImpliedDoLoop('i', 'i', 1, 42)) def test_sympy__codegen__fnodes__ArrayConstructor(): from sympy.codegen.fnodes import ArrayConstructor assert _test_args(ArrayConstructor([1, 2, 3])) from sympy.codegen.fnodes import ImpliedDoLoop idl = ImpliedDoLoop('i', 'i', 1, 42) assert _test_args(ArrayConstructor([1, idl, 3])) def test_sympy__codegen__fnodes__sum_(): from sympy.codegen.fnodes import sum_ assert _test_args(sum_('arr')) def test_sympy__codegen__fnodes__product_(): from sympy.codegen.fnodes import product_ assert _test_args(product_('arr')) @XFAIL def test_sympy__combinatorics__graycode__GrayCode(): from sympy.combinatorics.graycode import GrayCode # an integer is given and returned from GrayCode as the arg assert _test_args(GrayCode(3, start='100')) assert _test_args(GrayCode(3, rank=1)) def test_sympy__combinatorics__subsets__Subset(): from sympy.combinatorics.subsets import Subset assert _test_args(Subset([0, 1], [0, 1, 2, 3])) assert _test_args(Subset(['c', 'd'], ['a', 'b', 'c', 'd'])) @XFAIL def test_sympy__combinatorics__permutations__Permutation(): from sympy.combinatorics.permutations import Permutation assert _test_args(Permutation([0, 1, 2, 3])) def test_sympy__combinatorics__perm_groups__PermutationGroup(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.perm_groups import PermutationGroup assert _test_args(PermutationGroup([Permutation([0, 1])])) def test_sympy__combinatorics__polyhedron__Polyhedron(): from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.polyhedron import Polyhedron from sympy.abc import w, x, y, z pgroup = [Permutation([[0, 1, 2], [3]]), Permutation([[0, 1, 3], [2]]), Permutation([[0, 2, 3], [1]]), Permutation([[1, 2, 3], [0]]), Permutation([[0, 1], [2, 3]]), Permutation([[0, 2], [1, 3]]), Permutation([[0, 3], [1, 2]]), Permutation([[0, 1, 2, 3]])] corners = [w, x, y, z] faces = [(w, x, y), (w, y, z), (w, z, x), (x, y, z)] assert _test_args(Polyhedron(corners, faces, pgroup)) @XFAIL def test_sympy__combinatorics__prufer__Prufer(): from sympy.combinatorics.prufer import Prufer assert _test_args(Prufer([[0, 1], [0, 2], [0, 3]], 4)) def test_sympy__combinatorics__partitions__Partition(): from sympy.combinatorics.partitions import Partition assert _test_args(Partition([1])) @XFAIL def test_sympy__combinatorics__partitions__IntegerPartition(): from sympy.combinatorics.partitions import IntegerPartition assert _test_args(IntegerPartition([1])) def test_sympy__concrete__products__Product(): from sympy.concrete.products import Product assert _test_args(Product(x, (x, 0, 10))) assert _test_args(Product(x, (x, 0, y), (y, 0, 10))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__ExprWithLimits(): from sympy.concrete.expr_with_limits import ExprWithLimits assert _test_args(ExprWithLimits(x, (x, 0, 10))) assert _test_args(ExprWithLimits(x*y, (x, 0, 10.),(y,1.,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_limits__AddWithLimits(): from sympy.concrete.expr_with_limits import AddWithLimits assert _test_args(AddWithLimits(x, (x, 0, 10))) assert _test_args(AddWithLimits(x*y, (x, 0, 10),(y,1,3))) @SKIP("abstract Class") def test_sympy__concrete__expr_with_intlimits__ExprWithIntLimits(): from sympy.concrete.expr_with_intlimits import ExprWithIntLimits assert _test_args(ExprWithIntLimits(x, (x, 0, 10))) assert _test_args(ExprWithIntLimits(x*y, (x, 0, 10),(y,1,3))) def test_sympy__concrete__summations__Sum(): from sympy.concrete.summations import Sum assert _test_args(Sum(x, (x, 0, 10))) assert _test_args(Sum(x, (x, 0, y), (y, 0, 10))) def test_sympy__core__add__Add(): from sympy.core.add import Add assert _test_args(Add(x, y, z, 2)) def test_sympy__core__basic__Atom(): from sympy.core.basic import Atom assert _test_args(Atom()) def test_sympy__core__basic__Basic(): from sympy.core.basic import Basic assert _test_args(Basic()) def test_sympy__core__containers__Dict(): from sympy.core.containers import Dict assert _test_args(Dict({x: y, y: z})) def test_sympy__core__containers__Tuple(): from sympy.core.containers import Tuple assert _test_args(Tuple(x, y, z, 2)) def test_sympy__core__expr__AtomicExpr(): from sympy.core.expr import AtomicExpr assert _test_args(AtomicExpr()) def test_sympy__core__expr__Expr(): from sympy.core.expr import Expr assert _test_args(Expr()) def test_sympy__core__expr__UnevaluatedExpr(): from sympy.core.expr import UnevaluatedExpr from sympy.abc import x assert _test_args(UnevaluatedExpr(x)) def test_sympy__core__function__Application(): from sympy.core.function import Application assert _test_args(Application(1, 2, 3)) def test_sympy__core__function__AppliedUndef(): from sympy.core.function import AppliedUndef assert _test_args(AppliedUndef(1, 2, 3)) def test_sympy__core__function__Derivative(): from sympy.core.function import Derivative assert _test_args(Derivative(2, x, y, 3)) @SKIP("abstract class") def test_sympy__core__function__Function(): pass def test_sympy__core__function__Lambda(): assert _test_args(Lambda((x, y), x + y + z)) def test_sympy__core__function__Subs(): from sympy.core.function import Subs assert _test_args(Subs(x + y, x, 2)) def test_sympy__core__function__WildFunction(): from sympy.core.function import WildFunction assert _test_args(WildFunction('f')) def test_sympy__core__mod__Mod(): from sympy.core.mod import Mod assert _test_args(Mod(x, 2)) def test_sympy__core__mul__Mul(): from sympy.core.mul import Mul assert _test_args(Mul(2, x, y, z)) def test_sympy__core__numbers__Catalan(): from sympy.core.numbers import Catalan assert _test_args(Catalan()) def test_sympy__core__numbers__ComplexInfinity(): from sympy.core.numbers import ComplexInfinity assert _test_args(ComplexInfinity()) def test_sympy__core__numbers__EulerGamma(): from sympy.core.numbers import EulerGamma assert _test_args(EulerGamma()) def test_sympy__core__numbers__Exp1(): from sympy.core.numbers import Exp1 assert _test_args(Exp1()) def test_sympy__core__numbers__Float(): from sympy.core.numbers import Float assert _test_args(Float(1.23)) def test_sympy__core__numbers__GoldenRatio(): from sympy.core.numbers import GoldenRatio assert _test_args(GoldenRatio()) def test_sympy__core__numbers__Half(): from sympy.core.numbers import Half assert _test_args(Half()) def test_sympy__core__numbers__ImaginaryUnit(): from sympy.core.numbers import ImaginaryUnit assert _test_args(ImaginaryUnit()) def test_sympy__core__numbers__Infinity(): from sympy.core.numbers import Infinity assert _test_args(Infinity()) def test_sympy__core__numbers__Integer(): from sympy.core.numbers import Integer assert _test_args(Integer(7)) @SKIP("abstract class") def test_sympy__core__numbers__IntegerConstant(): pass def test_sympy__core__numbers__NaN(): from sympy.core.numbers import NaN assert _test_args(NaN()) def test_sympy__core__numbers__NegativeInfinity(): from sympy.core.numbers import NegativeInfinity assert _test_args(NegativeInfinity()) def test_sympy__core__numbers__NegativeOne(): from sympy.core.numbers import NegativeOne assert _test_args(NegativeOne()) def test_sympy__core__numbers__Number(): from sympy.core.numbers import Number assert _test_args(Number(1, 7)) def test_sympy__core__numbers__NumberSymbol(): from sympy.core.numbers import NumberSymbol assert _test_args(NumberSymbol()) def test_sympy__core__numbers__One(): from sympy.core.numbers import One assert _test_args(One()) def test_sympy__core__numbers__Pi(): from sympy.core.numbers import Pi assert _test_args(Pi()) def test_sympy__core__numbers__Rational(): from sympy.core.numbers import Rational assert _test_args(Rational(1, 7)) @SKIP("abstract class") def test_sympy__core__numbers__RationalConstant(): pass def test_sympy__core__numbers__Zero(): from sympy.core.numbers import Zero assert _test_args(Zero()) @SKIP("abstract class") def test_sympy__core__operations__AssocOp(): pass @SKIP("abstract class") def test_sympy__core__operations__LatticeOp(): pass def test_sympy__core__power__Pow(): from sympy.core.power import Pow assert _test_args(Pow(x, 2)) def test_sympy__algebras__quaternion__Quaternion(): from sympy.algebras.quaternion import Quaternion assert _test_args(Quaternion(x, 1, 2, 3)) def test_sympy__core__relational__Equality(): from sympy.core.relational import Equality assert _test_args(Equality(x, 2)) def test_sympy__core__relational__GreaterThan(): from sympy.core.relational import GreaterThan assert _test_args(GreaterThan(x, 2)) def test_sympy__core__relational__LessThan(): from sympy.core.relational import LessThan assert _test_args(LessThan(x, 2)) @SKIP("abstract class") def test_sympy__core__relational__Relational(): pass def test_sympy__core__relational__StrictGreaterThan(): from sympy.core.relational import StrictGreaterThan assert _test_args(StrictGreaterThan(x, 2)) def test_sympy__core__relational__StrictLessThan(): from sympy.core.relational import StrictLessThan assert _test_args(StrictLessThan(x, 2)) def test_sympy__core__relational__Unequality(): from sympy.core.relational import Unequality assert _test_args(Unequality(x, 2)) def test_sympy__sandbox__indexed_integrals__IndexedIntegral(): from sympy.tensor import IndexedBase, Idx from sympy.sandbox.indexed_integrals import IndexedIntegral A = IndexedBase('A') i, j = symbols('i j', integer=True) a1, a2 = symbols('a1:3', cls=Idx) assert _test_args(IndexedIntegral(A[a1], A[a2])) assert _test_args(IndexedIntegral(A[i], A[j])) def test_sympy__calculus__util__AccumulationBounds(): from sympy.calculus.util import AccumulationBounds assert _test_args(AccumulationBounds(0, 1)) def test_sympy__sets__ordinals__OmegaPower(): from sympy.sets.ordinals import OmegaPower assert _test_args(OmegaPower(1, 1)) def test_sympy__sets__ordinals__Ordinal(): from sympy.sets.ordinals import Ordinal, OmegaPower assert _test_args(Ordinal(OmegaPower(2, 1))) def test_sympy__sets__ordinals__OrdinalOmega(): from sympy.sets.ordinals import OrdinalOmega assert _test_args(OrdinalOmega()) def test_sympy__sets__ordinals__OrdinalZero(): from sympy.sets.ordinals import OrdinalZero assert _test_args(OrdinalZero()) def test_sympy__sets__sets__EmptySet(): from sympy.sets.sets import EmptySet assert _test_args(EmptySet()) def test_sympy__sets__sets__UniversalSet(): from sympy.sets.sets import UniversalSet assert _test_args(UniversalSet()) def test_sympy__sets__sets__FiniteSet(): from sympy.sets.sets import FiniteSet assert _test_args(FiniteSet(x, y, z)) def test_sympy__sets__sets__Interval(): from sympy.sets.sets import Interval assert _test_args(Interval(0, 1)) def test_sympy__sets__sets__ProductSet(): from sympy.sets.sets import ProductSet, Interval assert _test_args(ProductSet(Interval(0, 1), Interval(0, 1))) @SKIP("does it make sense to test this?") def test_sympy__sets__sets__Set(): from sympy.sets.sets import Set assert _test_args(Set()) def test_sympy__sets__sets__Intersection(): from sympy.sets.sets import Intersection, Interval assert _test_args(Intersection(Interval(0, 3), Interval(2, 4), evaluate=False)) def test_sympy__sets__sets__Union(): from sympy.sets.sets import Union, Interval assert _test_args(Union(Interval(0, 1), Interval(2, 3))) def test_sympy__sets__sets__Complement(): from sympy.sets.sets import Complement assert _test_args(Complement(Interval(0, 2), Interval(0, 1))) def test_sympy__sets__sets__SymmetricDifference(): from sympy.sets.sets import FiniteSet, SymmetricDifference assert _test_args(SymmetricDifference(FiniteSet(1, 2, 3), \ FiniteSet(2, 3, 4))) def test_sympy__core__trace__Tr(): from sympy.core.trace import Tr a, b = symbols('a b') assert _test_args(Tr(a + b)) def test_sympy__sets__setexpr__SetExpr(): from sympy.sets.setexpr import SetExpr assert _test_args(SetExpr(Interval(0, 1))) def test_sympy__sets__fancysets__Naturals(): from sympy.sets.fancysets import Naturals assert _test_args(Naturals()) def test_sympy__sets__fancysets__Naturals0(): from sympy.sets.fancysets import Naturals0 assert _test_args(Naturals0()) def test_sympy__sets__fancysets__Integers(): from sympy.sets.fancysets import Integers assert _test_args(Integers()) def test_sympy__sets__fancysets__Reals(): from sympy.sets.fancysets import Reals assert _test_args(Reals()) def test_sympy__sets__fancysets__Complexes(): from sympy.sets.fancysets import Complexes assert _test_args(Complexes()) def test_sympy__sets__fancysets__ComplexRegion(): from sympy.sets.fancysets import ComplexRegion from sympy import S from sympy.sets import Interval a = Interval(0, 1) b = Interval(2, 3) theta = Interval(0, 2*S.Pi) assert _test_args(ComplexRegion(a*b)) assert _test_args(ComplexRegion(a*theta, polar=True)) def test_sympy__sets__fancysets__ImageSet(): from sympy.sets.fancysets import ImageSet from sympy import S, Symbol x = Symbol('x') assert _test_args(ImageSet(Lambda(x, x**2), S.Naturals)) def test_sympy__sets__fancysets__Range(): from sympy.sets.fancysets import Range assert _test_args(Range(1, 5, 1)) def test_sympy__sets__conditionset__ConditionSet(): from sympy.sets.conditionset import ConditionSet from sympy import S, Symbol x = Symbol('x') assert _test_args(ConditionSet(x, Eq(x**2, 1), S.Reals)) def test_sympy__sets__contains__Contains(): from sympy.sets.fancysets import Range from sympy.sets.contains import Contains assert _test_args(Contains(x, Range(0, 10, 2))) # STATS from sympy.stats.crv_types import NormalDistribution nd = NormalDistribution(0, 1) from sympy.stats.frv_types import DieDistribution die = DieDistribution(6) def test_sympy__stats__crv__ContinuousDomain(): from sympy.stats.crv import ContinuousDomain assert _test_args(ContinuousDomain({x}, Interval(-oo, oo))) def test_sympy__stats__crv__SingleContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain assert _test_args(SingleContinuousDomain(x, Interval(-oo, oo))) def test_sympy__stats__crv__ProductContinuousDomain(): from sympy.stats.crv import SingleContinuousDomain, ProductContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) E = SingleContinuousDomain(y, Interval(0, oo)) assert _test_args(ProductContinuousDomain(D, E)) def test_sympy__stats__crv__ConditionalContinuousDomain(): from sympy.stats.crv import (SingleContinuousDomain, ConditionalContinuousDomain) D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ConditionalContinuousDomain(D, x > 0)) def test_sympy__stats__crv__ContinuousPSpace(): from sympy.stats.crv import ContinuousPSpace, SingleContinuousDomain D = SingleContinuousDomain(x, Interval(-oo, oo)) assert _test_args(ContinuousPSpace(D, nd)) def test_sympy__stats__crv__SingleContinuousPSpace(): from sympy.stats.crv import SingleContinuousPSpace assert _test_args(SingleContinuousPSpace(x, nd)) @SKIP("abstract class") def test_sympy__stats__crv__SingleContinuousDistribution(): pass def test_sympy__stats__drv__SingleDiscreteDomain(): from sympy.stats.drv import SingleDiscreteDomain assert _test_args(SingleDiscreteDomain(x, S.Naturals)) def test_sympy__stats__drv__ProductDiscreteDomain(): from sympy.stats.drv import SingleDiscreteDomain, ProductDiscreteDomain X = SingleDiscreteDomain(x, S.Naturals) Y = SingleDiscreteDomain(y, S.Integers) assert _test_args(ProductDiscreteDomain(X, Y)) def test_sympy__stats__drv__SingleDiscretePSpace(): from sympy.stats.drv import SingleDiscretePSpace from sympy.stats.drv_types import PoissonDistribution assert _test_args(SingleDiscretePSpace(x, PoissonDistribution(1))) def test_sympy__stats__drv__DiscretePSpace(): from sympy.stats.drv import DiscretePSpace, SingleDiscreteDomain density = Lambda(x, 2**(-x)) domain = SingleDiscreteDomain(x, S.Naturals) assert _test_args(DiscretePSpace(domain, density)) def test_sympy__stats__drv__ConditionalDiscreteDomain(): from sympy.stats.drv import ConditionalDiscreteDomain, SingleDiscreteDomain X = SingleDiscreteDomain(x, S.Naturals0) assert _test_args(ConditionalDiscreteDomain(X, x > 2)) @SKIP("abstract class") def test_sympy__stats__drv__SingleDiscreteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__drv__DiscreteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__drv__DiscreteDomain(): pass def test_sympy__stats__rv__RandomDomain(): from sympy.stats.rv import RandomDomain from sympy.sets.sets import FiniteSet assert _test_args(RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3))) def test_sympy__stats__rv__SingleDomain(): from sympy.stats.rv import SingleDomain from sympy.sets.sets import FiniteSet assert _test_args(SingleDomain(x, FiniteSet(1, 2, 3))) def test_sympy__stats__rv__ConditionalDomain(): from sympy.stats.rv import ConditionalDomain, RandomDomain from sympy.sets.sets import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2)) assert _test_args(ConditionalDomain(D, x > 1)) def test_sympy__stats__rv__PSpace(): from sympy.stats.rv import PSpace, RandomDomain from sympy import FiniteSet D = RandomDomain(FiniteSet(x), FiniteSet(1, 2, 3, 4, 5, 6)) assert _test_args(PSpace(D, die)) @SKIP("abstract Class") def test_sympy__stats__rv__SinglePSpace(): pass def test_sympy__stats__rv__RandomSymbol(): from sympy.stats.rv import RandomSymbol from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) assert _test_args(RandomSymbol(x, A)) def test_sympy__stats__rv__ProductPSpace(): from sympy.stats.rv import ProductPSpace from sympy.stats.crv import SingleContinuousPSpace A = SingleContinuousPSpace(x, nd) B = SingleContinuousPSpace(y, nd) assert _test_args(ProductPSpace(A, B)) def test_sympy__stats__rv__ProductDomain(): from sympy.stats.rv import ProductDomain, SingleDomain D = SingleDomain(x, Interval(-oo, oo)) E = SingleDomain(y, Interval(0, oo)) assert _test_args(ProductDomain(D, E)) def test_sympy__stats__symbolic_probability__Probability(): from sympy.stats.symbolic_probability import Probability from sympy.stats import Normal X = Normal('X', 0, 1) assert _test_args(Probability(X > 0)) def test_sympy__stats__symbolic_probability__Expectation(): from sympy.stats.symbolic_probability import Expectation from sympy.stats import Normal X = Normal('X', 0, 1) assert _test_args(Expectation(X > 0)) def test_sympy__stats__symbolic_probability__Covariance(): from sympy.stats.symbolic_probability import Covariance from sympy.stats import Normal X = Normal('X', 0, 1) Y = Normal('Y', 0, 3) assert _test_args(Covariance(X, Y)) def test_sympy__stats__symbolic_probability__Variance(): from sympy.stats.symbolic_probability import Variance from sympy.stats import Normal X = Normal('X', 0, 1) assert _test_args(Variance(X)) def test_sympy__stats__frv_types__DiscreteUniformDistribution(): from sympy.stats.frv_types import DiscreteUniformDistribution from sympy.core.containers import Tuple assert _test_args(DiscreteUniformDistribution(Tuple(*list(range(6))))) def test_sympy__stats__frv_types__DieDistribution(): assert _test_args(die) def test_sympy__stats__frv_types__BernoulliDistribution(): from sympy.stats.frv_types import BernoulliDistribution assert _test_args(BernoulliDistribution(S.Half, 0, 1)) def test_sympy__stats__frv_types__BinomialDistribution(): from sympy.stats.frv_types import BinomialDistribution assert _test_args(BinomialDistribution(5, S.Half, 1, 0)) def test_sympy__stats__frv_types__HypergeometricDistribution(): from sympy.stats.frv_types import HypergeometricDistribution assert _test_args(HypergeometricDistribution(10, 5, 3)) def test_sympy__stats__frv_types__RademacherDistribution(): from sympy.stats.frv_types import RademacherDistribution assert _test_args(RademacherDistribution()) def test_sympy__stats__frv__FiniteDomain(): from sympy.stats.frv import FiniteDomain assert _test_args(FiniteDomain({(x, 1), (x, 2)})) # x can be 1 or 2 def test_sympy__stats__frv__SingleFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain assert _test_args(SingleFiniteDomain(x, {1, 2})) # x can be 1 or 2 def test_sympy__stats__frv__ProductFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ProductFiniteDomain xd = SingleFiniteDomain(x, {1, 2}) yd = SingleFiniteDomain(y, {1, 2}) assert _test_args(ProductFiniteDomain(xd, yd)) def test_sympy__stats__frv__ConditionalFiniteDomain(): from sympy.stats.frv import SingleFiniteDomain, ConditionalFiniteDomain xd = SingleFiniteDomain(x, {1, 2}) assert _test_args(ConditionalFiniteDomain(xd, x > 1)) def test_sympy__stats__frv__FinitePSpace(): from sympy.stats.frv import FinitePSpace, SingleFiniteDomain xd = SingleFiniteDomain(x, {1, 2, 3, 4, 5, 6}) p = 1.0/6 xd = SingleFiniteDomain(x, {1, 2}) assert _test_args(FinitePSpace(xd, {(x, 1): S.Half, (x, 2): S.Half})) def test_sympy__stats__frv__SingleFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace from sympy import Symbol assert _test_args(SingleFinitePSpace(Symbol('x'), die)) def test_sympy__stats__frv__ProductFinitePSpace(): from sympy.stats.frv import SingleFinitePSpace, ProductFinitePSpace from sympy import Symbol xp = SingleFinitePSpace(Symbol('x'), die) yp = SingleFinitePSpace(Symbol('y'), die) assert _test_args(ProductFinitePSpace(xp, yp)) @SKIP("abstract class") def test_sympy__stats__frv__SingleFiniteDistribution(): pass @SKIP("abstract class") def test_sympy__stats__crv__ContinuousDistribution(): pass def test_sympy__stats__frv_types__FiniteDistributionHandmade(): from sympy.stats.frv_types import FiniteDistributionHandmade assert _test_args(FiniteDistributionHandmade({1: 1})) def test_sympy__stats__crv__ContinuousDistributionHandmade(): from sympy.stats.crv import ContinuousDistributionHandmade from sympy import Symbol, Interval assert _test_args(ContinuousDistributionHandmade(Symbol('x'), Interval(0, 2))) def test_sympy__stats__drv__DiscreteDistributionHandmade(): from sympy.stats.drv import DiscreteDistributionHandmade assert _test_args(DiscreteDistributionHandmade(x, S.Naturals)) def test_sympy__stats__rv__Density(): from sympy.stats.rv import Density from sympy.stats.crv_types import Normal assert _test_args(Density(Normal('x', 0, 1))) def test_sympy__stats__crv_types__ArcsinDistribution(): from sympy.stats.crv_types import ArcsinDistribution assert _test_args(ArcsinDistribution(0, 1)) def test_sympy__stats__crv_types__BeniniDistribution(): from sympy.stats.crv_types import BeniniDistribution assert _test_args(BeniniDistribution(1, 1, 1)) def test_sympy__stats__crv_types__BetaDistribution(): from sympy.stats.crv_types import BetaDistribution assert _test_args(BetaDistribution(1, 1)) def test_sympy__stats__crv_types__BetaPrimeDistribution(): from sympy.stats.crv_types import BetaPrimeDistribution assert _test_args(BetaPrimeDistribution(1, 1)) def test_sympy__stats__crv_types__CauchyDistribution(): from sympy.stats.crv_types import CauchyDistribution assert _test_args(CauchyDistribution(0, 1)) def test_sympy__stats__crv_types__ChiDistribution(): from sympy.stats.crv_types import ChiDistribution assert _test_args(ChiDistribution(1)) def test_sympy__stats__crv_types__ChiNoncentralDistribution(): from sympy.stats.crv_types import ChiNoncentralDistribution assert _test_args(ChiNoncentralDistribution(1,1)) def test_sympy__stats__crv_types__ChiSquaredDistribution(): from sympy.stats.crv_types import ChiSquaredDistribution assert _test_args(ChiSquaredDistribution(1)) def test_sympy__stats__crv_types__DagumDistribution(): from sympy.stats.crv_types import DagumDistribution assert _test_args(DagumDistribution(1, 1, 1)) def test_sympy__stats__crv_types__ExponentialDistribution(): from sympy.stats.crv_types import ExponentialDistribution assert _test_args(ExponentialDistribution(1)) def test_sympy__stats__crv_types__FDistributionDistribution(): from sympy.stats.crv_types import FDistributionDistribution assert _test_args(FDistributionDistribution(1, 1)) def test_sympy__stats__crv_types__FisherZDistribution(): from sympy.stats.crv_types import FisherZDistribution assert _test_args(FisherZDistribution(1, 1)) def test_sympy__stats__crv_types__FrechetDistribution(): from sympy.stats.crv_types import FrechetDistribution assert _test_args(FrechetDistribution(1, 1, 1)) def test_sympy__stats__crv_types__GammaInverseDistribution(): from sympy.stats.crv_types import GammaInverseDistribution assert _test_args(GammaInverseDistribution(1, 1)) def test_sympy__stats__crv_types__GammaDistribution(): from sympy.stats.crv_types import GammaDistribution assert _test_args(GammaDistribution(1, 1)) def test_sympy__stats__crv_types__GumbelDistribution(): from sympy.stats.crv_types import GumbelDistribution assert _test_args(GumbelDistribution(1, 1)) def test_sympy__stats__crv_types__GompertzDistribution(): from sympy.stats.crv_types import GompertzDistribution assert _test_args(GompertzDistribution(1, 1)) def test_sympy__stats__crv_types__KumaraswamyDistribution(): from sympy.stats.crv_types import KumaraswamyDistribution assert _test_args(KumaraswamyDistribution(1, 1)) def test_sympy__stats__crv_types__LaplaceDistribution(): from sympy.stats.crv_types import LaplaceDistribution assert _test_args(LaplaceDistribution(0, 1)) def test_sympy__stats__crv_types__LogisticDistribution(): from sympy.stats.crv_types import LogisticDistribution assert _test_args(LogisticDistribution(0, 1)) def test_sympy__stats__crv_types__LogNormalDistribution(): from sympy.stats.crv_types import LogNormalDistribution assert _test_args(LogNormalDistribution(0, 1)) def test_sympy__stats__crv_types__MaxwellDistribution(): from sympy.stats.crv_types import MaxwellDistribution assert _test_args(MaxwellDistribution(1)) def test_sympy__stats__crv_types__NakagamiDistribution(): from sympy.stats.crv_types import NakagamiDistribution assert _test_args(NakagamiDistribution(1, 1)) def test_sympy__stats__crv_types__NormalDistribution(): from sympy.stats.crv_types import NormalDistribution assert _test_args(NormalDistribution(0, 1)) def test_sympy__stats__crv_types__ParetoDistribution(): from sympy.stats.crv_types import ParetoDistribution assert _test_args(ParetoDistribution(1, 1)) def test_sympy__stats__crv_types__QuadraticUDistribution(): from sympy.stats.crv_types import QuadraticUDistribution assert _test_args(QuadraticUDistribution(1, 2)) def test_sympy__stats__crv_types__RaisedCosineDistribution(): from sympy.stats.crv_types import RaisedCosineDistribution assert _test_args(RaisedCosineDistribution(1, 1)) def test_sympy__stats__crv_types__RayleighDistribution(): from sympy.stats.crv_types import RayleighDistribution assert _test_args(RayleighDistribution(1)) def test_sympy__stats__crv_types__ShiftedGompertzDistribution(): from sympy.stats.crv_types import ShiftedGompertzDistribution assert _test_args(ShiftedGompertzDistribution(1, 1)) def test_sympy__stats__crv_types__StudentTDistribution(): from sympy.stats.crv_types import StudentTDistribution assert _test_args(StudentTDistribution(1)) def test_sympy__stats__crv_types__TrapezoidalDistribution(): from sympy.stats.crv_types import TrapezoidalDistribution assert _test_args(TrapezoidalDistribution(1, 2, 3, 4)) def test_sympy__stats__crv_types__TriangularDistribution(): from sympy.stats.crv_types import TriangularDistribution assert _test_args(TriangularDistribution(-1, 0, 1)) def test_sympy__stats__crv_types__UniformDistribution(): from sympy.stats.crv_types import UniformDistribution assert _test_args(UniformDistribution(0, 1)) def test_sympy__stats__crv_types__UniformSumDistribution(): from sympy.stats.crv_types import UniformSumDistribution assert _test_args(UniformSumDistribution(1)) def test_sympy__stats__crv_types__VonMisesDistribution(): from sympy.stats.crv_types import VonMisesDistribution assert _test_args(VonMisesDistribution(1, 1)) def test_sympy__stats__crv_types__WeibullDistribution(): from sympy.stats.crv_types import WeibullDistribution assert _test_args(WeibullDistribution(1, 1)) def test_sympy__stats__crv_types__WignerSemicircleDistribution(): from sympy.stats.crv_types import WignerSemicircleDistribution assert _test_args(WignerSemicircleDistribution(1)) def test_sympy__stats__drv_types__PoissonDistribution(): from sympy.stats.drv_types import PoissonDistribution assert _test_args(PoissonDistribution(1)) def test_sympy__stats__drv_types__GeometricDistribution(): from sympy.stats.drv_types import GeometricDistribution assert _test_args(GeometricDistribution(.5)) def test_sympy__core__symbol__Dummy(): from sympy.core.symbol import Dummy assert _test_args(Dummy('t')) def test_sympy__core__symbol__Symbol(): from sympy.core.symbol import Symbol assert _test_args(Symbol('t')) def test_sympy__core__symbol__Wild(): from sympy.core.symbol import Wild assert _test_args(Wild('x', exclude=[x])) @SKIP("abstract class") def test_sympy__functions__combinatorial__factorials__CombinatorialFunction(): pass def test_sympy__functions__combinatorial__factorials__FallingFactorial(): from sympy.functions.combinatorial.factorials import FallingFactorial assert _test_args(FallingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__MultiFactorial(): from sympy.functions.combinatorial.factorials import MultiFactorial assert _test_args(MultiFactorial(x)) def test_sympy__functions__combinatorial__factorials__RisingFactorial(): from sympy.functions.combinatorial.factorials import RisingFactorial assert _test_args(RisingFactorial(2, x)) def test_sympy__functions__combinatorial__factorials__binomial(): from sympy.functions.combinatorial.factorials import binomial assert _test_args(binomial(2, x)) def test_sympy__functions__combinatorial__factorials__subfactorial(): from sympy.functions.combinatorial.factorials import subfactorial assert _test_args(subfactorial(1)) def test_sympy__functions__combinatorial__factorials__factorial(): from sympy.functions.combinatorial.factorials import factorial assert _test_args(factorial(x)) def test_sympy__functions__combinatorial__factorials__factorial2(): from sympy.functions.combinatorial.factorials import factorial2 assert _test_args(factorial2(x)) def test_sympy__functions__combinatorial__numbers__bell(): from sympy.functions.combinatorial.numbers import bell assert _test_args(bell(x, y)) def test_sympy__functions__combinatorial__numbers__bernoulli(): from sympy.functions.combinatorial.numbers import bernoulli assert _test_args(bernoulli(x)) def test_sympy__functions__combinatorial__numbers__catalan(): from sympy.functions.combinatorial.numbers import catalan assert _test_args(catalan(x)) def test_sympy__functions__combinatorial__numbers__genocchi(): from sympy.functions.combinatorial.numbers import genocchi assert _test_args(genocchi(x)) def test_sympy__functions__combinatorial__numbers__euler(): from sympy.functions.combinatorial.numbers import euler assert _test_args(euler(x)) def test_sympy__functions__combinatorial__numbers__fibonacci(): from sympy.functions.combinatorial.numbers import fibonacci assert _test_args(fibonacci(x)) def test_sympy__functions__combinatorial__numbers__harmonic(): from sympy.functions.combinatorial.numbers import harmonic assert _test_args(harmonic(x, 2)) def test_sympy__functions__combinatorial__numbers__lucas(): from sympy.functions.combinatorial.numbers import lucas assert _test_args(lucas(x)) def test_sympy__functions__combinatorial__numbers__partition(): from sympy.core.symbol import Symbol from sympy.functions.combinatorial.numbers import partition assert _test_args(partition(Symbol('a', integer=True))) def test_sympy__functions__elementary__complexes__Abs(): from sympy.functions.elementary.complexes import Abs assert _test_args(Abs(x)) def test_sympy__functions__elementary__complexes__adjoint(): from sympy.functions.elementary.complexes import adjoint assert _test_args(adjoint(x)) def test_sympy__functions__elementary__complexes__arg(): from sympy.functions.elementary.complexes import arg assert _test_args(arg(x)) def test_sympy__functions__elementary__complexes__conjugate(): from sympy.functions.elementary.complexes import conjugate assert _test_args(conjugate(x)) def test_sympy__functions__elementary__complexes__im(): from sympy.functions.elementary.complexes import im assert _test_args(im(x)) def test_sympy__functions__elementary__complexes__re(): from sympy.functions.elementary.complexes import re assert _test_args(re(x)) def test_sympy__functions__elementary__complexes__sign(): from sympy.functions.elementary.complexes import sign assert _test_args(sign(x)) def test_sympy__functions__elementary__complexes__polar_lift(): from sympy.functions.elementary.complexes import polar_lift assert _test_args(polar_lift(x)) def test_sympy__functions__elementary__complexes__periodic_argument(): from sympy.functions.elementary.complexes import periodic_argument assert _test_args(periodic_argument(x, y)) def test_sympy__functions__elementary__complexes__principal_branch(): from sympy.functions.elementary.complexes import principal_branch assert _test_args(principal_branch(x, y)) def test_sympy__functions__elementary__complexes__transpose(): from sympy.functions.elementary.complexes import transpose assert _test_args(transpose(x)) def test_sympy__functions__elementary__exponential__LambertW(): from sympy.functions.elementary.exponential import LambertW assert _test_args(LambertW(2)) @SKIP("abstract class") def test_sympy__functions__elementary__exponential__ExpBase(): pass def test_sympy__functions__elementary__exponential__exp(): from sympy.functions.elementary.exponential import exp assert _test_args(exp(2)) def test_sympy__functions__elementary__exponential__exp_polar(): from sympy.functions.elementary.exponential import exp_polar assert _test_args(exp_polar(2)) def test_sympy__functions__elementary__exponential__log(): from sympy.functions.elementary.exponential import log assert _test_args(log(2)) @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__HyperbolicFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__ReciprocalHyperbolicFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__hyperbolic__InverseHyperbolicFunction(): pass def test_sympy__functions__elementary__hyperbolic__acosh(): from sympy.functions.elementary.hyperbolic import acosh assert _test_args(acosh(2)) def test_sympy__functions__elementary__hyperbolic__acoth(): from sympy.functions.elementary.hyperbolic import acoth assert _test_args(acoth(2)) def test_sympy__functions__elementary__hyperbolic__asinh(): from sympy.functions.elementary.hyperbolic import asinh assert _test_args(asinh(2)) def test_sympy__functions__elementary__hyperbolic__atanh(): from sympy.functions.elementary.hyperbolic import atanh assert _test_args(atanh(2)) def test_sympy__functions__elementary__hyperbolic__asech(): from sympy.functions.elementary.hyperbolic import asech assert _test_args(asech(2)) def test_sympy__functions__elementary__hyperbolic__acsch(): from sympy.functions.elementary.hyperbolic import acsch assert _test_args(acsch(2)) def test_sympy__functions__elementary__hyperbolic__cosh(): from sympy.functions.elementary.hyperbolic import cosh assert _test_args(cosh(2)) def test_sympy__functions__elementary__hyperbolic__coth(): from sympy.functions.elementary.hyperbolic import coth assert _test_args(coth(2)) def test_sympy__functions__elementary__hyperbolic__csch(): from sympy.functions.elementary.hyperbolic import csch assert _test_args(csch(2)) def test_sympy__functions__elementary__hyperbolic__sech(): from sympy.functions.elementary.hyperbolic import sech assert _test_args(sech(2)) def test_sympy__functions__elementary__hyperbolic__sinh(): from sympy.functions.elementary.hyperbolic import sinh assert _test_args(sinh(2)) def test_sympy__functions__elementary__hyperbolic__tanh(): from sympy.functions.elementary.hyperbolic import tanh assert _test_args(tanh(2)) @SKIP("does this work at all?") def test_sympy__functions__elementary__integers__RoundFunction(): from sympy.functions.elementary.integers import RoundFunction assert _test_args(RoundFunction()) def test_sympy__functions__elementary__integers__ceiling(): from sympy.functions.elementary.integers import ceiling assert _test_args(ceiling(x)) def test_sympy__functions__elementary__integers__floor(): from sympy.functions.elementary.integers import floor assert _test_args(floor(x)) def test_sympy__functions__elementary__integers__frac(): from sympy.functions.elementary.integers import frac assert _test_args(frac(x)) def test_sympy__functions__elementary__miscellaneous__IdentityFunction(): from sympy.functions.elementary.miscellaneous import IdentityFunction assert _test_args(IdentityFunction()) def test_sympy__functions__elementary__miscellaneous__Max(): from sympy.functions.elementary.miscellaneous import Max assert _test_args(Max(x, 2)) def test_sympy__functions__elementary__miscellaneous__Min(): from sympy.functions.elementary.miscellaneous import Min assert _test_args(Min(x, 2)) @SKIP("abstract class") def test_sympy__functions__elementary__miscellaneous__MinMaxBase(): pass def test_sympy__functions__elementary__piecewise__ExprCondPair(): from sympy.functions.elementary.piecewise import ExprCondPair assert _test_args(ExprCondPair(1, True)) def test_sympy__functions__elementary__piecewise__Piecewise(): from sympy.functions.elementary.piecewise import Piecewise assert _test_args(Piecewise((1, x >= 0), (0, True))) @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__TrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__ReciprocalTrigonometricFunction(): pass @SKIP("abstract class") def test_sympy__functions__elementary__trigonometric__InverseTrigonometricFunction(): pass def test_sympy__functions__elementary__trigonometric__acos(): from sympy.functions.elementary.trigonometric import acos assert _test_args(acos(2)) def test_sympy__functions__elementary__trigonometric__acot(): from sympy.functions.elementary.trigonometric import acot assert _test_args(acot(2)) def test_sympy__functions__elementary__trigonometric__asin(): from sympy.functions.elementary.trigonometric import asin assert _test_args(asin(2)) def test_sympy__functions__elementary__trigonometric__asec(): from sympy.functions.elementary.trigonometric import asec assert _test_args(asec(2)) def test_sympy__functions__elementary__trigonometric__acsc(): from sympy.functions.elementary.trigonometric import acsc assert _test_args(acsc(2)) def test_sympy__functions__elementary__trigonometric__atan(): from sympy.functions.elementary.trigonometric import atan assert _test_args(atan(2)) def test_sympy__functions__elementary__trigonometric__atan2(): from sympy.functions.elementary.trigonometric import atan2 assert _test_args(atan2(2, 3)) def test_sympy__functions__elementary__trigonometric__cos(): from sympy.functions.elementary.trigonometric import cos assert _test_args(cos(2)) def test_sympy__functions__elementary__trigonometric__csc(): from sympy.functions.elementary.trigonometric import csc assert _test_args(csc(2)) def test_sympy__functions__elementary__trigonometric__cot(): from sympy.functions.elementary.trigonometric import cot assert _test_args(cot(2)) def test_sympy__functions__elementary__trigonometric__sin(): assert _test_args(sin(2)) def test_sympy__functions__elementary__trigonometric__sinc(): from sympy.functions.elementary.trigonometric import sinc assert _test_args(sinc(2)) def test_sympy__functions__elementary__trigonometric__sec(): from sympy.functions.elementary.trigonometric import sec assert _test_args(sec(2)) def test_sympy__functions__elementary__trigonometric__tan(): from sympy.functions.elementary.trigonometric import tan assert _test_args(tan(2)) @SKIP("abstract class") def test_sympy__functions__special__bessel__BesselBase(): pass @SKIP("abstract class") def test_sympy__functions__special__bessel__SphericalBesselBase(): pass @SKIP("abstract class") def test_sympy__functions__special__bessel__SphericalHankelBase(): pass def test_sympy__functions__special__bessel__besseli(): from sympy.functions.special.bessel import besseli assert _test_args(besseli(x, 1)) def test_sympy__functions__special__bessel__besselj(): from sympy.functions.special.bessel import besselj assert _test_args(besselj(x, 1)) def test_sympy__functions__special__bessel__besselk(): from sympy.functions.special.bessel import besselk assert _test_args(besselk(x, 1)) def test_sympy__functions__special__bessel__bessely(): from sympy.functions.special.bessel import bessely assert _test_args(bessely(x, 1)) def test_sympy__functions__special__bessel__hankel1(): from sympy.functions.special.bessel import hankel1 assert _test_args(hankel1(x, 1)) def test_sympy__functions__special__bessel__hankel2(): from sympy.functions.special.bessel import hankel2 assert _test_args(hankel2(x, 1)) def test_sympy__functions__special__bessel__jn(): from sympy.functions.special.bessel import jn assert _test_args(jn(0, x)) def test_sympy__functions__special__bessel__yn(): from sympy.functions.special.bessel import yn assert _test_args(yn(0, x)) def test_sympy__functions__special__bessel__hn1(): from sympy.functions.special.bessel import hn1 assert _test_args(hn1(0, x)) def test_sympy__functions__special__bessel__hn2(): from sympy.functions.special.bessel import hn2 assert _test_args(hn2(0, x)) def test_sympy__functions__special__bessel__AiryBase(): pass def test_sympy__functions__special__bessel__airyai(): from sympy.functions.special.bessel import airyai assert _test_args(airyai(2)) def test_sympy__functions__special__bessel__airybi(): from sympy.functions.special.bessel import airybi assert _test_args(airybi(2)) def test_sympy__functions__special__bessel__airyaiprime(): from sympy.functions.special.bessel import airyaiprime assert _test_args(airyaiprime(2)) def test_sympy__functions__special__bessel__airybiprime(): from sympy.functions.special.bessel import airybiprime assert _test_args(airybiprime(2)) def test_sympy__functions__special__elliptic_integrals__elliptic_k(): from sympy.functions.special.elliptic_integrals import elliptic_k as K assert _test_args(K(x)) def test_sympy__functions__special__elliptic_integrals__elliptic_f(): from sympy.functions.special.elliptic_integrals import elliptic_f as F assert _test_args(F(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_e(): from sympy.functions.special.elliptic_integrals import elliptic_e as E assert _test_args(E(x)) assert _test_args(E(x, y)) def test_sympy__functions__special__elliptic_integrals__elliptic_pi(): from sympy.functions.special.elliptic_integrals import elliptic_pi as P assert _test_args(P(x, y)) assert _test_args(P(x, y, z)) def test_sympy__functions__special__delta_functions__DiracDelta(): from sympy.functions.special.delta_functions import DiracDelta assert _test_args(DiracDelta(x, 1)) def test_sympy__functions__special__singularity_functions__SingularityFunction(): from sympy.functions.special.singularity_functions import SingularityFunction assert _test_args(SingularityFunction(x, y, z)) def test_sympy__functions__special__delta_functions__Heaviside(): from sympy.functions.special.delta_functions import Heaviside assert _test_args(Heaviside(x)) def test_sympy__functions__special__error_functions__erf(): from sympy.functions.special.error_functions import erf assert _test_args(erf(2)) def test_sympy__functions__special__error_functions__erfc(): from sympy.functions.special.error_functions import erfc assert _test_args(erfc(2)) def test_sympy__functions__special__error_functions__erfi(): from sympy.functions.special.error_functions import erfi assert _test_args(erfi(2)) def test_sympy__functions__special__error_functions__erf2(): from sympy.functions.special.error_functions import erf2 assert _test_args(erf2(2, 3)) def test_sympy__functions__special__error_functions__erfinv(): from sympy.functions.special.error_functions import erfinv assert _test_args(erfinv(2)) def test_sympy__functions__special__error_functions__erfcinv(): from sympy.functions.special.error_functions import erfcinv assert _test_args(erfcinv(2)) def test_sympy__functions__special__error_functions__erf2inv(): from sympy.functions.special.error_functions import erf2inv assert _test_args(erf2inv(2, 3)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__FresnelIntegral(): pass def test_sympy__functions__special__error_functions__fresnels(): from sympy.functions.special.error_functions import fresnels assert _test_args(fresnels(2)) def test_sympy__functions__special__error_functions__fresnelc(): from sympy.functions.special.error_functions import fresnelc assert _test_args(fresnelc(2)) def test_sympy__functions__special__error_functions__erfs(): from sympy.functions.special.error_functions import _erfs assert _test_args(_erfs(2)) def test_sympy__functions__special__error_functions__Ei(): from sympy.functions.special.error_functions import Ei assert _test_args(Ei(2)) def test_sympy__functions__special__error_functions__li(): from sympy.functions.special.error_functions import li assert _test_args(li(2)) def test_sympy__functions__special__error_functions__Li(): from sympy.functions.special.error_functions import Li assert _test_args(Li(2)) @SKIP("abstract class") def test_sympy__functions__special__error_functions__TrigonometricIntegral(): pass def test_sympy__functions__special__error_functions__Si(): from sympy.functions.special.error_functions import Si assert _test_args(Si(2)) def test_sympy__functions__special__error_functions__Ci(): from sympy.functions.special.error_functions import Ci assert _test_args(Ci(2)) def test_sympy__functions__special__error_functions__Shi(): from sympy.functions.special.error_functions import Shi assert _test_args(Shi(2)) def test_sympy__functions__special__error_functions__Chi(): from sympy.functions.special.error_functions import Chi assert _test_args(Chi(2)) def test_sympy__functions__special__error_functions__expint(): from sympy.functions.special.error_functions import expint assert _test_args(expint(y, x)) def test_sympy__functions__special__gamma_functions__gamma(): from sympy.functions.special.gamma_functions import gamma assert _test_args(gamma(x)) def test_sympy__functions__special__gamma_functions__loggamma(): from sympy.functions.special.gamma_functions import loggamma assert _test_args(loggamma(2)) def test_sympy__functions__special__gamma_functions__lowergamma(): from sympy.functions.special.gamma_functions import lowergamma assert _test_args(lowergamma(x, 2)) def test_sympy__functions__special__gamma_functions__polygamma(): from sympy.functions.special.gamma_functions import polygamma assert _test_args(polygamma(x, 2)) def test_sympy__functions__special__gamma_functions__uppergamma(): from sympy.functions.special.gamma_functions import uppergamma assert _test_args(uppergamma(x, 2)) def test_sympy__functions__special__beta_functions__beta(): from sympy.functions.special.beta_functions import beta assert _test_args(beta(x, x)) def test_sympy__functions__special__mathieu_functions__MathieuBase(): pass def test_sympy__functions__special__mathieu_functions__mathieus(): from sympy.functions.special.mathieu_functions import mathieus assert _test_args(mathieus(1, 1, 1)) def test_sympy__functions__special__mathieu_functions__mathieuc(): from sympy.functions.special.mathieu_functions import mathieuc assert _test_args(mathieuc(1, 1, 1)) def test_sympy__functions__special__mathieu_functions__mathieusprime(): from sympy.functions.special.mathieu_functions import mathieusprime assert _test_args(mathieusprime(1, 1, 1)) def test_sympy__functions__special__mathieu_functions__mathieucprime(): from sympy.functions.special.mathieu_functions import mathieucprime assert _test_args(mathieucprime(1, 1, 1)) @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleParametersBase(): pass @SKIP("abstract class") def test_sympy__functions__special__hyper__TupleArg(): pass def test_sympy__functions__special__hyper__hyper(): from sympy.functions.special.hyper import hyper assert _test_args(hyper([1, 2, 3], [4, 5], x)) def test_sympy__functions__special__hyper__meijerg(): from sympy.functions.special.hyper import meijerg assert _test_args(meijerg([1, 2, 3], [4, 5], [6], [], x)) @SKIP("abstract class") def test_sympy__functions__special__hyper__HyperRep(): pass def test_sympy__functions__special__hyper__HyperRep_power1(): from sympy.functions.special.hyper import HyperRep_power1 assert _test_args(HyperRep_power1(x, y)) def test_sympy__functions__special__hyper__HyperRep_power2(): from sympy.functions.special.hyper import HyperRep_power2 assert _test_args(HyperRep_power2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log1(): from sympy.functions.special.hyper import HyperRep_log1 assert _test_args(HyperRep_log1(x)) def test_sympy__functions__special__hyper__HyperRep_atanh(): from sympy.functions.special.hyper import HyperRep_atanh assert _test_args(HyperRep_atanh(x)) def test_sympy__functions__special__hyper__HyperRep_asin1(): from sympy.functions.special.hyper import HyperRep_asin1 assert _test_args(HyperRep_asin1(x)) def test_sympy__functions__special__hyper__HyperRep_asin2(): from sympy.functions.special.hyper import HyperRep_asin2 assert _test_args(HyperRep_asin2(x)) def test_sympy__functions__special__hyper__HyperRep_sqrts1(): from sympy.functions.special.hyper import HyperRep_sqrts1 assert _test_args(HyperRep_sqrts1(x, y)) def test_sympy__functions__special__hyper__HyperRep_sqrts2(): from sympy.functions.special.hyper import HyperRep_sqrts2 assert _test_args(HyperRep_sqrts2(x, y)) def test_sympy__functions__special__hyper__HyperRep_log2(): from sympy.functions.special.hyper import HyperRep_log2 assert _test_args(HyperRep_log2(x)) def test_sympy__functions__special__hyper__HyperRep_cosasin(): from sympy.functions.special.hyper import HyperRep_cosasin assert _test_args(HyperRep_cosasin(x, y)) def test_sympy__functions__special__hyper__HyperRep_sinasin(): from sympy.functions.special.hyper import HyperRep_sinasin assert _test_args(HyperRep_sinasin(x, y)) @SKIP("abstract class") def test_sympy__functions__special__polynomials__OrthogonalPolynomial(): pass def test_sympy__functions__special__polynomials__jacobi(): from sympy.functions.special.polynomials import jacobi assert _test_args(jacobi(x, 2, 2, 2)) def test_sympy__functions__special__polynomials__gegenbauer(): from sympy.functions.special.polynomials import gegenbauer assert _test_args(gegenbauer(x, 2, 2)) def test_sympy__functions__special__polynomials__chebyshevt(): from sympy.functions.special.polynomials import chebyshevt assert _test_args(chebyshevt(x, 2)) def test_sympy__functions__special__polynomials__chebyshevt_root(): from sympy.functions.special.polynomials import chebyshevt_root assert _test_args(chebyshevt_root(3, 2)) def test_sympy__functions__special__polynomials__chebyshevu(): from sympy.functions.special.polynomials import chebyshevu assert _test_args(chebyshevu(x, 2)) def test_sympy__functions__special__polynomials__chebyshevu_root(): from sympy.functions.special.polynomials import chebyshevu_root assert _test_args(chebyshevu_root(3, 2)) def test_sympy__functions__special__polynomials__hermite(): from sympy.functions.special.polynomials import hermite assert _test_args(hermite(x, 2)) def test_sympy__functions__special__polynomials__legendre(): from sympy.functions.special.polynomials import legendre assert _test_args(legendre(x, 2)) def test_sympy__functions__special__polynomials__assoc_legendre(): from sympy.functions.special.polynomials import assoc_legendre assert _test_args(assoc_legendre(x, 0, y)) def test_sympy__functions__special__polynomials__laguerre(): from sympy.functions.special.polynomials import laguerre assert _test_args(laguerre(x, 2)) def test_sympy__functions__special__polynomials__assoc_laguerre(): from sympy.functions.special.polynomials import assoc_laguerre assert _test_args(assoc_laguerre(x, 0, y)) def test_sympy__functions__special__spherical_harmonics__Ynm(): from sympy.functions.special.spherical_harmonics import Ynm assert _test_args(Ynm(1, 1, x, y)) def test_sympy__functions__special__spherical_harmonics__Znm(): from sympy.functions.special.spherical_harmonics import Znm assert _test_args(Znm(1, 1, x, y)) def test_sympy__functions__special__tensor_functions__LeviCivita(): from sympy.functions.special.tensor_functions import LeviCivita assert _test_args(LeviCivita(x, y, 2)) def test_sympy__functions__special__tensor_functions__KroneckerDelta(): from sympy.functions.special.tensor_functions import KroneckerDelta assert _test_args(KroneckerDelta(x, y)) def test_sympy__functions__special__zeta_functions__dirichlet_eta(): from sympy.functions.special.zeta_functions import dirichlet_eta assert _test_args(dirichlet_eta(x)) def test_sympy__functions__special__zeta_functions__zeta(): from sympy.functions.special.zeta_functions import zeta assert _test_args(zeta(101)) def test_sympy__functions__special__zeta_functions__lerchphi(): from sympy.functions.special.zeta_functions import lerchphi assert _test_args(lerchphi(x, y, z)) def test_sympy__functions__special__zeta_functions__polylog(): from sympy.functions.special.zeta_functions import polylog assert _test_args(polylog(x, y)) def test_sympy__functions__special__zeta_functions__stieltjes(): from sympy.functions.special.zeta_functions import stieltjes assert _test_args(stieltjes(x, y)) def test_sympy__integrals__integrals__Integral(): from sympy.integrals.integrals import Integral assert _test_args(Integral(2, (x, 0, 1))) def test_sympy__integrals__risch__NonElementaryIntegral(): from sympy.integrals.risch import NonElementaryIntegral assert _test_args(NonElementaryIntegral(exp(-x**2), x)) @SKIP("abstract class") def test_sympy__integrals__transforms__IntegralTransform(): pass def test_sympy__integrals__transforms__MellinTransform(): from sympy.integrals.transforms import MellinTransform assert _test_args(MellinTransform(2, x, y)) def test_sympy__integrals__transforms__InverseMellinTransform(): from sympy.integrals.transforms import InverseMellinTransform assert _test_args(InverseMellinTransform(2, x, y, 0, 1)) def test_sympy__integrals__transforms__LaplaceTransform(): from sympy.integrals.transforms import LaplaceTransform assert _test_args(LaplaceTransform(2, x, y)) def test_sympy__integrals__transforms__InverseLaplaceTransform(): from sympy.integrals.transforms import InverseLaplaceTransform assert _test_args(InverseLaplaceTransform(2, x, y, 0)) @SKIP("abstract class") def test_sympy__integrals__transforms__FourierTypeTransform(): pass def test_sympy__integrals__transforms__InverseFourierTransform(): from sympy.integrals.transforms import InverseFourierTransform assert _test_args(InverseFourierTransform(2, x, y)) def test_sympy__integrals__transforms__FourierTransform(): from sympy.integrals.transforms import FourierTransform assert _test_args(FourierTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__SineCosineTypeTransform(): pass def test_sympy__integrals__transforms__InverseSineTransform(): from sympy.integrals.transforms import InverseSineTransform assert _test_args(InverseSineTransform(2, x, y)) def test_sympy__integrals__transforms__SineTransform(): from sympy.integrals.transforms import SineTransform assert _test_args(SineTransform(2, x, y)) def test_sympy__integrals__transforms__InverseCosineTransform(): from sympy.integrals.transforms import InverseCosineTransform assert _test_args(InverseCosineTransform(2, x, y)) def test_sympy__integrals__transforms__CosineTransform(): from sympy.integrals.transforms import CosineTransform assert _test_args(CosineTransform(2, x, y)) @SKIP("abstract class") def test_sympy__integrals__transforms__HankelTypeTransform(): pass def test_sympy__integrals__transforms__InverseHankelTransform(): from sympy.integrals.transforms import InverseHankelTransform assert _test_args(InverseHankelTransform(2, x, y, 0)) def test_sympy__integrals__transforms__HankelTransform(): from sympy.integrals.transforms import HankelTransform assert _test_args(HankelTransform(2, x, y, 0)) @XFAIL def test_sympy__liealgebras__cartan_type__CartanType_generator(): from sympy.liealgebras.cartan_type import CartanType_generator assert _test_args(CartanType_generator("A2")) @XFAIL def test_sympy__liealgebras__cartan_type__Standard_Cartan(): from sympy.liealgebras.cartan_type import Standard_Cartan assert _test_args(Standard_Cartan("A", 2)) @XFAIL def test_sympy__liealgebras__weyl_group__WeylGroup(): from sympy.liealgebras.weyl_group import WeylGroup assert _test_args(WeylGroup("B4")) @XFAIL def test_sympy__liealgebras__root_system__RootSystem(): from sympy.liealgebras.root_system import RootSystem assert _test_args(RootSystem("A2")) @XFAIL def test_sympy__liealgebras__type_a__TypeA(): from sympy.liealgebras.type_a import TypeA assert _test_args(TypeA(2)) @XFAIL def test_sympy__liealgebras__type_b__TypeB(): from sympy.liealgebras.type_b import TypeB assert _test_args(TypeB(4)) @XFAIL def test_sympy__liealgebras__type_c__TypeC(): from sympy.liealgebras.type_c import TypeC assert _test_args(TypeC(4)) @XFAIL def test_sympy__liealgebras__type_d__TypeD(): from sympy.liealgebras.type_d import TypeD assert _test_args(TypeD(4)) @XFAIL def test_sympy__liealgebras__type_e__TypeE(): from sympy.liealgebras.type_e import TypeE assert _test_args(TypeE(6)) @XFAIL def test_sympy__liealgebras__type_f__TypeF(): from sympy.liealgebras.type_f import TypeF assert _test_args(TypeF(4)) @XFAIL def test_sympy__liealgebras__type_g__TypeG(): from sympy.liealgebras.type_g import TypeG assert _test_args(TypeG(2)) def test_sympy__logic__boolalg__And(): from sympy.logic.boolalg import And assert _test_args(And(x, y, 1)) @SKIP("abstract class") def test_sympy__logic__boolalg__Boolean(): pass def test_sympy__logic__boolalg__BooleanFunction(): from sympy.logic.boolalg import BooleanFunction assert _test_args(BooleanFunction(1, 2, 3)) @SKIP("abstract class") def test_sympy__logic__boolalg__BooleanAtom(): pass def test_sympy__logic__boolalg__BooleanTrue(): from sympy.logic.boolalg import true assert _test_args(true) def test_sympy__logic__boolalg__BooleanFalse(): from sympy.logic.boolalg import false assert _test_args(false) def test_sympy__logic__boolalg__Equivalent(): from sympy.logic.boolalg import Equivalent assert _test_args(Equivalent(x, 2)) def test_sympy__logic__boolalg__ITE(): from sympy.logic.boolalg import ITE assert _test_args(ITE(x, y, 1)) def test_sympy__logic__boolalg__Implies(): from sympy.logic.boolalg import Implies assert _test_args(Implies(x, y)) def test_sympy__logic__boolalg__Nand(): from sympy.logic.boolalg import Nand assert _test_args(Nand(x, y, 1)) def test_sympy__logic__boolalg__Nor(): from sympy.logic.boolalg import Nor assert _test_args(Nor(x, y)) def test_sympy__logic__boolalg__Not(): from sympy.logic.boolalg import Not assert _test_args(Not(x)) def test_sympy__logic__boolalg__Or(): from sympy.logic.boolalg import Or assert _test_args(Or(x, y)) def test_sympy__logic__boolalg__Xor(): from sympy.logic.boolalg import Xor assert _test_args(Xor(x, y, 2)) def test_sympy__logic__boolalg__Xnor(): from sympy.logic.boolalg import Xnor assert _test_args(Xnor(x, y, 2)) def test_sympy__matrices__matrices__DeferredVector(): from sympy.matrices.matrices import DeferredVector assert _test_args(DeferredVector("X")) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixBase(): pass def test_sympy__matrices__immutable__ImmutableDenseMatrix(): from sympy.matrices.immutable import ImmutableDenseMatrix m = ImmutableDenseMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableDenseMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableDenseMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableDenseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__immutable__ImmutableSparseMatrix(): from sympy.matrices.immutable import ImmutableSparseMatrix m = ImmutableSparseMatrix([[1, 2], [3, 4]]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, {(0, 0): 1}) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(1, 1, [1]) assert _test_args(m) assert _test_args(Basic(*list(m))) m = ImmutableSparseMatrix(2, 2, lambda i, j: 1) assert m[0, 0] is S.One m = ImmutableSparseMatrix(2, 2, lambda i, j: 1/(1 + i) + 1/(1 + j)) assert m[1, 1] is S.One # true div. will give 1.0 if i,j not sympified assert _test_args(m) assert _test_args(Basic(*list(m))) def test_sympy__matrices__expressions__slice__MatrixSlice(): from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 4, 4) assert _test_args(MatrixSlice(X, (0, 2), (0, 2))) def test_sympy__matrices__expressions__blockmatrix__BlockDiagMatrix(): from sympy.matrices.expressions.blockmatrix import BlockDiagMatrix from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) assert _test_args(BlockDiagMatrix(X, Y)) def test_sympy__matrices__expressions__blockmatrix__BlockMatrix(): from sympy.matrices.expressions.blockmatrix import BlockMatrix from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix X = MatrixSymbol('X', x, x) Y = MatrixSymbol('Y', y, y) Z = MatrixSymbol('Z', x, y) O = ZeroMatrix(y, x) assert _test_args(BlockMatrix([[X, Z], [O, Y]])) def test_sympy__matrices__expressions__inverse__Inverse(): from sympy.matrices.expressions.inverse import Inverse from sympy.matrices.expressions import MatrixSymbol assert _test_args(Inverse(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__matadd__MatAdd(): from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(MatAdd(X, Y)) def test_sympy__matrices__expressions__matexpr__Identity(): from sympy.matrices.expressions.matexpr import Identity assert _test_args(Identity(3)) @SKIP("abstract class") def test_sympy__matrices__expressions__matexpr__MatrixExpr(): pass def test_sympy__matrices__expressions__matexpr__MatrixElement(): from sympy.matrices.expressions.matexpr import MatrixSymbol, MatrixElement from sympy import S assert _test_args(MatrixElement(MatrixSymbol('A', 3, 5), S(2), S(3))) @XFAIL def test_sympy__matrices__expressions__matexpr__MatrixSymbol(): from sympy.matrices.expressions.matexpr import MatrixSymbol assert _test_args(MatrixSymbol('A', 3, 5)) def test_sympy__matrices__expressions__matexpr__ZeroMatrix(): from sympy.matrices.expressions.matexpr import ZeroMatrix assert _test_args(ZeroMatrix(3, 5)) def test_sympy__matrices__expressions__matmul__MatMul(): from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', y, x) assert _test_args(MatMul(X, Y)) def test_sympy__matrices__expressions__dotproduct__DotProduct(): from sympy.matrices.expressions.dotproduct import DotProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, 1) Y = MatrixSymbol('Y', x, 1) assert _test_args(DotProduct(X, Y)) def test_sympy__matrices__expressions__diagonal__DiagonalMatrix(): from sympy.matrices.expressions.diagonal import DiagonalMatrix from sympy.matrices.expressions import MatrixSymbol x = MatrixSymbol('x', 10, 1) assert _test_args(DiagonalMatrix(x)) def test_sympy__matrices__expressions__diagonal__DiagonalOf(): from sympy.matrices.expressions.diagonal import DiagonalOf from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('x', 10, 10) assert _test_args(DiagonalOf(X)) def test_sympy__matrices__expressions__hadamard__HadamardProduct(): from sympy.matrices.expressions.hadamard import HadamardProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(HadamardProduct(X, Y)) def test_sympy__matrices__expressions__kronecker__KroneckerProduct(): from sympy.matrices.expressions.kronecker import KroneckerProduct from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, y) Y = MatrixSymbol('Y', x, y) assert _test_args(KroneckerProduct(X, Y)) def test_sympy__matrices__expressions__matpow__MatPow(): from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', x, x) assert _test_args(MatPow(X, 2)) def test_sympy__matrices__expressions__transpose__Transpose(): from sympy.matrices.expressions.transpose import Transpose from sympy.matrices.expressions import MatrixSymbol assert _test_args(Transpose(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__adjoint__Adjoint(): from sympy.matrices.expressions.adjoint import Adjoint from sympy.matrices.expressions import MatrixSymbol assert _test_args(Adjoint(MatrixSymbol('A', 3, 5))) def test_sympy__matrices__expressions__trace__Trace(): from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions import MatrixSymbol assert _test_args(Trace(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__determinant__Determinant(): from sympy.matrices.expressions.determinant import Determinant from sympy.matrices.expressions import MatrixSymbol assert _test_args(Determinant(MatrixSymbol('A', 3, 3))) def test_sympy__matrices__expressions__funcmatrix__FunctionMatrix(): from sympy.matrices.expressions.funcmatrix import FunctionMatrix from sympy import symbols i, j = symbols('i,j') assert _test_args(FunctionMatrix(3, 3, Lambda((i, j), i - j) )) def test_sympy__matrices__expressions__fourier__DFT(): from sympy.matrices.expressions.fourier import DFT from sympy import S assert _test_args(DFT(S(2))) def test_sympy__matrices__expressions__fourier__IDFT(): from sympy.matrices.expressions.fourier import IDFT from sympy import S assert _test_args(IDFT(S(2))) from sympy.matrices.expressions import MatrixSymbol X = MatrixSymbol('X', 10, 10) def test_sympy__matrices__expressions__factorizations__LofLU(): from sympy.matrices.expressions.factorizations import LofLU assert _test_args(LofLU(X)) def test_sympy__matrices__expressions__factorizations__UofLU(): from sympy.matrices.expressions.factorizations import UofLU assert _test_args(UofLU(X)) def test_sympy__matrices__expressions__factorizations__QofQR(): from sympy.matrices.expressions.factorizations import QofQR assert _test_args(QofQR(X)) def test_sympy__matrices__expressions__factorizations__RofQR(): from sympy.matrices.expressions.factorizations import RofQR assert _test_args(RofQR(X)) def test_sympy__matrices__expressions__factorizations__LofCholesky(): from sympy.matrices.expressions.factorizations import LofCholesky assert _test_args(LofCholesky(X)) def test_sympy__matrices__expressions__factorizations__UofCholesky(): from sympy.matrices.expressions.factorizations import UofCholesky assert _test_args(UofCholesky(X)) def test_sympy__matrices__expressions__factorizations__EigenVectors(): from sympy.matrices.expressions.factorizations import EigenVectors assert _test_args(EigenVectors(X)) def test_sympy__matrices__expressions__factorizations__EigenValues(): from sympy.matrices.expressions.factorizations import EigenValues assert _test_args(EigenValues(X)) def test_sympy__matrices__expressions__factorizations__UofSVD(): from sympy.matrices.expressions.factorizations import UofSVD assert _test_args(UofSVD(X)) def test_sympy__matrices__expressions__factorizations__VofSVD(): from sympy.matrices.expressions.factorizations import VofSVD assert _test_args(VofSVD(X)) def test_sympy__matrices__expressions__factorizations__SofSVD(): from sympy.matrices.expressions.factorizations import SofSVD assert _test_args(SofSVD(X)) @SKIP("abstract class") def test_sympy__matrices__expressions__factorizations__Factorization(): pass def test_sympy__physics__vector__frame__CoordinateSym(): from sympy.physics.vector import CoordinateSym from sympy.physics.vector import ReferenceFrame assert _test_args(CoordinateSym('R_x', ReferenceFrame('R'), 0)) def test_sympy__physics__paulialgebra__Pauli(): from sympy.physics.paulialgebra import Pauli assert _test_args(Pauli(1)) def test_sympy__physics__quantum__anticommutator__AntiCommutator(): from sympy.physics.quantum.anticommutator import AntiCommutator assert _test_args(AntiCommutator(x, y)) def test_sympy__physics__quantum__cartesian__PositionBra3D(): from sympy.physics.quantum.cartesian import PositionBra3D assert _test_args(PositionBra3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionKet3D(): from sympy.physics.quantum.cartesian import PositionKet3D assert _test_args(PositionKet3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PositionState3D(): from sympy.physics.quantum.cartesian import PositionState3D assert _test_args(PositionState3D(x, y, z)) def test_sympy__physics__quantum__cartesian__PxBra(): from sympy.physics.quantum.cartesian import PxBra assert _test_args(PxBra(x, y, z)) def test_sympy__physics__quantum__cartesian__PxKet(): from sympy.physics.quantum.cartesian import PxKet assert _test_args(PxKet(x, y, z)) def test_sympy__physics__quantum__cartesian__PxOp(): from sympy.physics.quantum.cartesian import PxOp assert _test_args(PxOp(x, y, z)) def test_sympy__physics__quantum__cartesian__XBra(): from sympy.physics.quantum.cartesian import XBra assert _test_args(XBra(x)) def test_sympy__physics__quantum__cartesian__XKet(): from sympy.physics.quantum.cartesian import XKet assert _test_args(XKet(x)) def test_sympy__physics__quantum__cartesian__XOp(): from sympy.physics.quantum.cartesian import XOp assert _test_args(XOp(x)) def test_sympy__physics__quantum__cartesian__YOp(): from sympy.physics.quantum.cartesian import YOp assert _test_args(YOp(x)) def test_sympy__physics__quantum__cartesian__ZOp(): from sympy.physics.quantum.cartesian import ZOp assert _test_args(ZOp(x)) def test_sympy__physics__quantum__cg__CG(): from sympy.physics.quantum.cg import CG from sympy import S assert _test_args(CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1)) def test_sympy__physics__quantum__cg__Wigner3j(): from sympy.physics.quantum.cg import Wigner3j assert _test_args(Wigner3j(6, 0, 4, 0, 2, 0)) def test_sympy__physics__quantum__cg__Wigner6j(): from sympy.physics.quantum.cg import Wigner6j assert _test_args(Wigner6j(1, 2, 3, 2, 1, 2)) def test_sympy__physics__quantum__cg__Wigner9j(): from sympy.physics.quantum.cg import Wigner9j assert _test_args(Wigner9j(2, 1, 1, S(3)/2, S(1)/2, 1, S(1)/2, S(1)/2, 0)) def test_sympy__physics__quantum__circuitplot__Mz(): from sympy.physics.quantum.circuitplot import Mz assert _test_args(Mz(0)) def test_sympy__physics__quantum__circuitplot__Mx(): from sympy.physics.quantum.circuitplot import Mx assert _test_args(Mx(0)) def test_sympy__physics__quantum__commutator__Commutator(): from sympy.physics.quantum.commutator import Commutator A, B = symbols('A,B', commutative=False) assert _test_args(Commutator(A, B)) def test_sympy__physics__quantum__constants__HBar(): from sympy.physics.quantum.constants import HBar assert _test_args(HBar()) def test_sympy__physics__quantum__dagger__Dagger(): from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.state import Ket assert _test_args(Dagger(Dagger(Ket('psi')))) def test_sympy__physics__quantum__gate__CGate(): from sympy.physics.quantum.gate import CGate, Gate assert _test_args(CGate((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CGateS(): from sympy.physics.quantum.gate import CGateS, Gate assert _test_args(CGateS((0, 1), Gate(2))) def test_sympy__physics__quantum__gate__CNotGate(): from sympy.physics.quantum.gate import CNotGate assert _test_args(CNotGate(0, 1)) def test_sympy__physics__quantum__gate__Gate(): from sympy.physics.quantum.gate import Gate assert _test_args(Gate(0)) def test_sympy__physics__quantum__gate__HadamardGate(): from sympy.physics.quantum.gate import HadamardGate assert _test_args(HadamardGate(0)) def test_sympy__physics__quantum__gate__IdentityGate(): from sympy.physics.quantum.gate import IdentityGate assert _test_args(IdentityGate(0)) def test_sympy__physics__quantum__gate__OneQubitGate(): from sympy.physics.quantum.gate import OneQubitGate assert _test_args(OneQubitGate(0)) def test_sympy__physics__quantum__gate__PhaseGate(): from sympy.physics.quantum.gate import PhaseGate assert _test_args(PhaseGate(0)) def test_sympy__physics__quantum__gate__SwapGate(): from sympy.physics.quantum.gate import SwapGate assert _test_args(SwapGate(0, 1)) def test_sympy__physics__quantum__gate__TGate(): from sympy.physics.quantum.gate import TGate assert _test_args(TGate(0)) def test_sympy__physics__quantum__gate__TwoQubitGate(): from sympy.physics.quantum.gate import TwoQubitGate assert _test_args(TwoQubitGate(0)) def test_sympy__physics__quantum__gate__UGate(): from sympy.physics.quantum.gate import UGate from sympy.matrices.immutable import ImmutableDenseMatrix from sympy import Integer, Tuple assert _test_args( UGate(Tuple(Integer(1)), ImmutableDenseMatrix([[1, 0], [0, 2]]))) def test_sympy__physics__quantum__gate__XGate(): from sympy.physics.quantum.gate import XGate assert _test_args(XGate(0)) def test_sympy__physics__quantum__gate__YGate(): from sympy.physics.quantum.gate import YGate assert _test_args(YGate(0)) def test_sympy__physics__quantum__gate__ZGate(): from sympy.physics.quantum.gate import ZGate assert _test_args(ZGate(0)) @SKIP("TODO: sympy.physics") def test_sympy__physics__quantum__grover__OracleGate(): from sympy.physics.quantum.grover import OracleGate assert _test_args(OracleGate()) def test_sympy__physics__quantum__grover__WGate(): from sympy.physics.quantum.grover import WGate assert _test_args(WGate(1)) def test_sympy__physics__quantum__hilbert__ComplexSpace(): from sympy.physics.quantum.hilbert import ComplexSpace assert _test_args(ComplexSpace(x)) def test_sympy__physics__quantum__hilbert__DirectSumHilbertSpace(): from sympy.physics.quantum.hilbert import DirectSumHilbertSpace, ComplexSpace, FockSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(DirectSumHilbertSpace(c, f)) def test_sympy__physics__quantum__hilbert__FockSpace(): from sympy.physics.quantum.hilbert import FockSpace assert _test_args(FockSpace()) def test_sympy__physics__quantum__hilbert__HilbertSpace(): from sympy.physics.quantum.hilbert import HilbertSpace assert _test_args(HilbertSpace()) def test_sympy__physics__quantum__hilbert__L2(): from sympy.physics.quantum.hilbert import L2 from sympy import oo, Interval assert _test_args(L2(Interval(0, oo))) def test_sympy__physics__quantum__hilbert__TensorPowerHilbertSpace(): from sympy.physics.quantum.hilbert import TensorPowerHilbertSpace, FockSpace f = FockSpace() assert _test_args(TensorPowerHilbertSpace(f, 2)) def test_sympy__physics__quantum__hilbert__TensorProductHilbertSpace(): from sympy.physics.quantum.hilbert import TensorProductHilbertSpace, FockSpace, ComplexSpace c = ComplexSpace(2) f = FockSpace() assert _test_args(TensorProductHilbertSpace(f, c)) def test_sympy__physics__quantum__innerproduct__InnerProduct(): from sympy.physics.quantum import Bra, Ket, InnerProduct b = Bra('b') k = Ket('k') assert _test_args(InnerProduct(b, k)) def test_sympy__physics__quantum__operator__DifferentialOperator(): from sympy.physics.quantum.operator import DifferentialOperator from sympy import Derivative, Function f = Function('f') assert _test_args(DifferentialOperator(1/x*Derivative(f(x), x), f(x))) def test_sympy__physics__quantum__operator__HermitianOperator(): from sympy.physics.quantum.operator import HermitianOperator assert _test_args(HermitianOperator('H')) def test_sympy__physics__quantum__operator__IdentityOperator(): from sympy.physics.quantum.operator import IdentityOperator assert _test_args(IdentityOperator(5)) def test_sympy__physics__quantum__operator__Operator(): from sympy.physics.quantum.operator import Operator assert _test_args(Operator('A')) def test_sympy__physics__quantum__operator__OuterProduct(): from sympy.physics.quantum.operator import OuterProduct from sympy.physics.quantum import Ket, Bra b = Bra('b') k = Ket('k') assert _test_args(OuterProduct(k, b)) def test_sympy__physics__quantum__operator__UnitaryOperator(): from sympy.physics.quantum.operator import UnitaryOperator assert _test_args(UnitaryOperator('U')) def test_sympy__physics__quantum__piab__PIABBra(): from sympy.physics.quantum.piab import PIABBra assert _test_args(PIABBra('B')) def test_sympy__physics__quantum__boson__BosonOp(): from sympy.physics.quantum.boson import BosonOp assert _test_args(BosonOp('a')) assert _test_args(BosonOp('a', False)) def test_sympy__physics__quantum__boson__BosonFockKet(): from sympy.physics.quantum.boson import BosonFockKet assert _test_args(BosonFockKet(1)) def test_sympy__physics__quantum__boson__BosonFockBra(): from sympy.physics.quantum.boson import BosonFockBra assert _test_args(BosonFockBra(1)) def test_sympy__physics__quantum__boson__BosonCoherentKet(): from sympy.physics.quantum.boson import BosonCoherentKet assert _test_args(BosonCoherentKet(1)) def test_sympy__physics__quantum__boson__BosonCoherentBra(): from sympy.physics.quantum.boson import BosonCoherentBra assert _test_args(BosonCoherentBra(1)) def test_sympy__physics__quantum__fermion__FermionOp(): from sympy.physics.quantum.fermion import FermionOp assert _test_args(FermionOp('c')) assert _test_args(FermionOp('c', False)) def test_sympy__physics__quantum__fermion__FermionFockKet(): from sympy.physics.quantum.fermion import FermionFockKet assert _test_args(FermionFockKet(1)) def test_sympy__physics__quantum__fermion__FermionFockBra(): from sympy.physics.quantum.fermion import FermionFockBra assert _test_args(FermionFockBra(1)) def test_sympy__physics__quantum__pauli__SigmaOpBase(): from sympy.physics.quantum.pauli import SigmaOpBase assert _test_args(SigmaOpBase()) def test_sympy__physics__quantum__pauli__SigmaX(): from sympy.physics.quantum.pauli import SigmaX assert _test_args(SigmaX()) def test_sympy__physics__quantum__pauli__SigmaY(): from sympy.physics.quantum.pauli import SigmaY assert _test_args(SigmaY()) def test_sympy__physics__quantum__pauli__SigmaZ(): from sympy.physics.quantum.pauli import SigmaZ assert _test_args(SigmaZ()) def test_sympy__physics__quantum__pauli__SigmaMinus(): from sympy.physics.quantum.pauli import SigmaMinus assert _test_args(SigmaMinus()) def test_sympy__physics__quantum__pauli__SigmaPlus(): from sympy.physics.quantum.pauli import SigmaPlus assert _test_args(SigmaPlus()) def test_sympy__physics__quantum__pauli__SigmaZKet(): from sympy.physics.quantum.pauli import SigmaZKet assert _test_args(SigmaZKet(0)) def test_sympy__physics__quantum__pauli__SigmaZBra(): from sympy.physics.quantum.pauli import SigmaZBra assert _test_args(SigmaZBra(0)) def test_sympy__physics__quantum__piab__PIABHamiltonian(): from sympy.physics.quantum.piab import PIABHamiltonian assert _test_args(PIABHamiltonian('P')) def test_sympy__physics__quantum__piab__PIABKet(): from sympy.physics.quantum.piab import PIABKet assert _test_args(PIABKet('K')) def test_sympy__physics__quantum__qexpr__QExpr(): from sympy.physics.quantum.qexpr import QExpr assert _test_args(QExpr(0)) def test_sympy__physics__quantum__qft__Fourier(): from sympy.physics.quantum.qft import Fourier assert _test_args(Fourier(0, 1)) def test_sympy__physics__quantum__qft__IQFT(): from sympy.physics.quantum.qft import IQFT assert _test_args(IQFT(0, 1)) def test_sympy__physics__quantum__qft__QFT(): from sympy.physics.quantum.qft import QFT assert _test_args(QFT(0, 1)) def test_sympy__physics__quantum__qft__RkGate(): from sympy.physics.quantum.qft import RkGate assert _test_args(RkGate(0, 1)) def test_sympy__physics__quantum__qubit__IntQubit(): from sympy.physics.quantum.qubit import IntQubit assert _test_args(IntQubit(0)) def test_sympy__physics__quantum__qubit__IntQubitBra(): from sympy.physics.quantum.qubit import IntQubitBra assert _test_args(IntQubitBra(0)) def test_sympy__physics__quantum__qubit__IntQubitState(): from sympy.physics.quantum.qubit import IntQubitState, QubitState assert _test_args(IntQubitState(QubitState(0, 1))) def test_sympy__physics__quantum__qubit__Qubit(): from sympy.physics.quantum.qubit import Qubit assert _test_args(Qubit(0, 0, 0)) def test_sympy__physics__quantum__qubit__QubitBra(): from sympy.physics.quantum.qubit import QubitBra assert _test_args(QubitBra('1', 0)) def test_sympy__physics__quantum__qubit__QubitState(): from sympy.physics.quantum.qubit import QubitState assert _test_args(QubitState(0, 1)) def test_sympy__physics__quantum__density__Density(): from sympy.physics.quantum.density import Density from sympy.physics.quantum.state import Ket assert _test_args(Density([Ket(0), 0.5], [Ket(1), 0.5])) @SKIP("TODO: sympy.physics.quantum.shor: Cmod Not Implemented") def test_sympy__physics__quantum__shor__CMod(): from sympy.physics.quantum.shor import CMod assert _test_args(CMod()) def test_sympy__physics__quantum__spin__CoupledSpinState(): from sympy.physics.quantum.spin import CoupledSpinState assert _test_args(CoupledSpinState(1, 0, (1, 1))) assert _test_args(CoupledSpinState(1, 0, (1, S(1)/2, S(1)/2))) assert _test_args(CoupledSpinState( 1, 0, (1, S(1)/2, S(1)/2), ((2, 3, S(1)/2), (1, 2, 1)) )) j, m, j1, j2, j3, j12, x = symbols('j m j1:4 j12 x') assert CoupledSpinState( j, m, (j1, j2, j3)).subs(j2, x) == CoupledSpinState(j, m, (j1, x, j3)) assert CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, j12), (1, 2, j)) ).subs(j12, x) == \ CoupledSpinState(j, m, (j1, j2, j3), ((1, 3, x), (1, 2, j)) ) def test_sympy__physics__quantum__spin__J2Op(): from sympy.physics.quantum.spin import J2Op assert _test_args(J2Op('J')) def test_sympy__physics__quantum__spin__JminusOp(): from sympy.physics.quantum.spin import JminusOp assert _test_args(JminusOp('J')) def test_sympy__physics__quantum__spin__JplusOp(): from sympy.physics.quantum.spin import JplusOp assert _test_args(JplusOp('J')) def test_sympy__physics__quantum__spin__JxBra(): from sympy.physics.quantum.spin import JxBra assert _test_args(JxBra(1, 0)) def test_sympy__physics__quantum__spin__JxBraCoupled(): from sympy.physics.quantum.spin import JxBraCoupled assert _test_args(JxBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxKet(): from sympy.physics.quantum.spin import JxKet assert _test_args(JxKet(1, 0)) def test_sympy__physics__quantum__spin__JxKetCoupled(): from sympy.physics.quantum.spin import JxKetCoupled assert _test_args(JxKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JxOp(): from sympy.physics.quantum.spin import JxOp assert _test_args(JxOp('J')) def test_sympy__physics__quantum__spin__JyBra(): from sympy.physics.quantum.spin import JyBra assert _test_args(JyBra(1, 0)) def test_sympy__physics__quantum__spin__JyBraCoupled(): from sympy.physics.quantum.spin import JyBraCoupled assert _test_args(JyBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyKet(): from sympy.physics.quantum.spin import JyKet assert _test_args(JyKet(1, 0)) def test_sympy__physics__quantum__spin__JyKetCoupled(): from sympy.physics.quantum.spin import JyKetCoupled assert _test_args(JyKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JyOp(): from sympy.physics.quantum.spin import JyOp assert _test_args(JyOp('J')) def test_sympy__physics__quantum__spin__JzBra(): from sympy.physics.quantum.spin import JzBra assert _test_args(JzBra(1, 0)) def test_sympy__physics__quantum__spin__JzBraCoupled(): from sympy.physics.quantum.spin import JzBraCoupled assert _test_args(JzBraCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzKet(): from sympy.physics.quantum.spin import JzKet assert _test_args(JzKet(1, 0)) def test_sympy__physics__quantum__spin__JzKetCoupled(): from sympy.physics.quantum.spin import JzKetCoupled assert _test_args(JzKetCoupled(1, 0, (1, 1))) def test_sympy__physics__quantum__spin__JzOp(): from sympy.physics.quantum.spin import JzOp assert _test_args(JzOp('J')) def test_sympy__physics__quantum__spin__Rotation(): from sympy.physics.quantum.spin import Rotation assert _test_args(Rotation(pi, 0, pi/2)) def test_sympy__physics__quantum__spin__SpinState(): from sympy.physics.quantum.spin import SpinState assert _test_args(SpinState(1, 0)) def test_sympy__physics__quantum__spin__WignerD(): from sympy.physics.quantum.spin import WignerD assert _test_args(WignerD(0, 1, 2, 3, 4, 5)) def test_sympy__physics__quantum__state__Bra(): from sympy.physics.quantum.state import Bra assert _test_args(Bra(0)) def test_sympy__physics__quantum__state__BraBase(): from sympy.physics.quantum.state import BraBase assert _test_args(BraBase(0)) def test_sympy__physics__quantum__state__Ket(): from sympy.physics.quantum.state import Ket assert _test_args(Ket(0)) def test_sympy__physics__quantum__state__KetBase(): from sympy.physics.quantum.state import KetBase assert _test_args(KetBase(0)) def test_sympy__physics__quantum__state__State(): from sympy.physics.quantum.state import State assert _test_args(State(0)) def test_sympy__physics__quantum__state__StateBase(): from sympy.physics.quantum.state import StateBase assert _test_args(StateBase(0)) def test_sympy__physics__quantum__state__TimeDepBra(): from sympy.physics.quantum.state import TimeDepBra assert _test_args(TimeDepBra('psi', 't')) def test_sympy__physics__quantum__state__TimeDepKet(): from sympy.physics.quantum.state import TimeDepKet assert _test_args(TimeDepKet('psi', 't')) def test_sympy__physics__quantum__state__TimeDepState(): from sympy.physics.quantum.state import TimeDepState assert _test_args(TimeDepState('psi', 't')) def test_sympy__physics__quantum__state__Wavefunction(): from sympy.physics.quantum.state import Wavefunction from sympy.functions import sin from sympy import Piecewise n = 1 L = 1 g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) assert _test_args(Wavefunction(g, x)) def test_sympy__physics__quantum__tensorproduct__TensorProduct(): from sympy.physics.quantum.tensorproduct import TensorProduct assert _test_args(TensorProduct(x, y)) def test_sympy__physics__quantum__identitysearch__GateIdentity(): from sympy.physics.quantum.gate import X from sympy.physics.quantum.identitysearch import GateIdentity assert _test_args(GateIdentity(X(0), X(0))) def test_sympy__physics__quantum__sho1d__SHOOp(): from sympy.physics.quantum.sho1d import SHOOp assert _test_args(SHOOp('a')) def test_sympy__physics__quantum__sho1d__RaisingOp(): from sympy.physics.quantum.sho1d import RaisingOp assert _test_args(RaisingOp('a')) def test_sympy__physics__quantum__sho1d__LoweringOp(): from sympy.physics.quantum.sho1d import LoweringOp assert _test_args(LoweringOp('a')) def test_sympy__physics__quantum__sho1d__NumberOp(): from sympy.physics.quantum.sho1d import NumberOp assert _test_args(NumberOp('N')) def test_sympy__physics__quantum__sho1d__Hamiltonian(): from sympy.physics.quantum.sho1d import Hamiltonian assert _test_args(Hamiltonian('H')) def test_sympy__physics__quantum__sho1d__SHOState(): from sympy.physics.quantum.sho1d import SHOState assert _test_args(SHOState(0)) def test_sympy__physics__quantum__sho1d__SHOKet(): from sympy.physics.quantum.sho1d import SHOKet assert _test_args(SHOKet(0)) def test_sympy__physics__quantum__sho1d__SHOBra(): from sympy.physics.quantum.sho1d import SHOBra assert _test_args(SHOBra(0)) def test_sympy__physics__secondquant__AnnihilateBoson(): from sympy.physics.secondquant import AnnihilateBoson assert _test_args(AnnihilateBoson(0)) def test_sympy__physics__secondquant__AnnihilateFermion(): from sympy.physics.secondquant import AnnihilateFermion assert _test_args(AnnihilateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Annihilator(): pass def test_sympy__physics__secondquant__AntiSymmetricTensor(): from sympy.physics.secondquant import AntiSymmetricTensor i, j = symbols('i j', below_fermi=True) a, b = symbols('a b', above_fermi=True) assert _test_args(AntiSymmetricTensor('v', (a, i), (b, j))) def test_sympy__physics__secondquant__BosonState(): from sympy.physics.secondquant import BosonState assert _test_args(BosonState((0, 1))) @SKIP("abstract class") def test_sympy__physics__secondquant__BosonicOperator(): pass def test_sympy__physics__secondquant__Commutator(): from sympy.physics.secondquant import Commutator assert _test_args(Commutator(x, y)) def test_sympy__physics__secondquant__CreateBoson(): from sympy.physics.secondquant import CreateBoson assert _test_args(CreateBoson(0)) def test_sympy__physics__secondquant__CreateFermion(): from sympy.physics.secondquant import CreateFermion assert _test_args(CreateFermion(0)) @SKIP("abstract class") def test_sympy__physics__secondquant__Creator(): pass def test_sympy__physics__secondquant__Dagger(): from sympy.physics.secondquant import Dagger from sympy import I assert _test_args(Dagger(2*I)) def test_sympy__physics__secondquant__FermionState(): from sympy.physics.secondquant import FermionState assert _test_args(FermionState((0, 1))) def test_sympy__physics__secondquant__FermionicOperator(): from sympy.physics.secondquant import FermionicOperator assert _test_args(FermionicOperator(0)) def test_sympy__physics__secondquant__FockState(): from sympy.physics.secondquant import FockState assert _test_args(FockState((0, 1))) def test_sympy__physics__secondquant__FockStateBosonBra(): from sympy.physics.secondquant import FockStateBosonBra assert _test_args(FockStateBosonBra((0, 1))) def test_sympy__physics__secondquant__FockStateBosonKet(): from sympy.physics.secondquant import FockStateBosonKet assert _test_args(FockStateBosonKet((0, 1))) def test_sympy__physics__secondquant__FockStateBra(): from sympy.physics.secondquant import FockStateBra assert _test_args(FockStateBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionBra(): from sympy.physics.secondquant import FockStateFermionBra assert _test_args(FockStateFermionBra((0, 1))) def test_sympy__physics__secondquant__FockStateFermionKet(): from sympy.physics.secondquant import FockStateFermionKet assert _test_args(FockStateFermionKet((0, 1))) def test_sympy__physics__secondquant__FockStateKet(): from sympy.physics.secondquant import FockStateKet assert _test_args(FockStateKet((0, 1))) def test_sympy__physics__secondquant__InnerProduct(): from sympy.physics.secondquant import InnerProduct from sympy.physics.secondquant import FockStateKet, FockStateBra assert _test_args(InnerProduct(FockStateBra((0, 1)), FockStateKet((0, 1)))) def test_sympy__physics__secondquant__NO(): from sympy.physics.secondquant import NO, F, Fd assert _test_args(NO(Fd(x)*F(y))) def test_sympy__physics__secondquant__PermutationOperator(): from sympy.physics.secondquant import PermutationOperator assert _test_args(PermutationOperator(0, 1)) def test_sympy__physics__secondquant__SqOperator(): from sympy.physics.secondquant import SqOperator assert _test_args(SqOperator(0)) def test_sympy__physics__secondquant__TensorSymbol(): from sympy.physics.secondquant import TensorSymbol assert _test_args(TensorSymbol(x)) def test_sympy__physics__units__dimensions__Dimension(): from sympy.physics.units.dimensions import Dimension assert _test_args(Dimension("length", "L")) def test_sympy__physics__units__dimensions__DimensionSystem(): from sympy.physics.units.dimensions import DimensionSystem from sympy.physics.units.dimensions import length, time, velocity assert _test_args(DimensionSystem((length, time), (velocity,))) def test_sympy__physics__units__quantities__Quantity(): from sympy.physics.units.quantities import Quantity from sympy.physics.units import length assert _test_args(Quantity("dam")) def test_sympy__physics__units__prefixes__Prefix(): from sympy.physics.units.prefixes import Prefix assert _test_args(Prefix('kilo', 'k', 3)) def test_sympy__core__numbers__AlgebraicNumber(): from sympy.core.numbers import AlgebraicNumber assert _test_args(AlgebraicNumber(sqrt(2), [1, 2, 3])) def test_sympy__polys__polytools__GroebnerBasis(): from sympy.polys.polytools import GroebnerBasis assert _test_args(GroebnerBasis([x, y, z], x, y, z)) def test_sympy__polys__polytools__Poly(): from sympy.polys.polytools import Poly assert _test_args(Poly(2, x, y)) def test_sympy__polys__polytools__PurePoly(): from sympy.polys.polytools import PurePoly assert _test_args(PurePoly(2, x, y)) @SKIP('abstract class') def test_sympy__polys__rootoftools__RootOf(): pass def test_sympy__polys__rootoftools__ComplexRootOf(): from sympy.polys.rootoftools import ComplexRootOf assert _test_args(ComplexRootOf(x**3 + x + 1, 0)) def test_sympy__polys__rootoftools__RootSum(): from sympy.polys.rootoftools import RootSum assert _test_args(RootSum(x**3 + x + 1, sin)) def test_sympy__series__limits__Limit(): from sympy.series.limits import Limit assert _test_args(Limit(x, x, 0, dir='-')) def test_sympy__series__order__Order(): from sympy.series.order import Order assert _test_args(Order(1, x, y)) @SKIP('Abstract Class') def test_sympy__series__sequences__SeqBase(): pass def test_sympy__series__sequences__EmptySequence(): from sympy.series.sequences import EmptySequence assert _test_args(EmptySequence()) @SKIP('Abstract Class') def test_sympy__series__sequences__SeqExpr(): pass def test_sympy__series__sequences__SeqPer(): from sympy.series.sequences import SeqPer assert _test_args(SeqPer((1, 2, 3), (0, 10))) def test_sympy__series__sequences__SeqFormula(): from sympy.series.sequences import SeqFormula assert _test_args(SeqFormula(x**2, (0, 10))) def test_sympy__series__sequences__SeqExprOp(): from sympy.series.sequences import SeqExprOp, sequence s1 = sequence((1, 2, 3)) s2 = sequence(x**2) assert _test_args(SeqExprOp(s1, s2)) def test_sympy__series__sequences__SeqAdd(): from sympy.series.sequences import SeqAdd, sequence s1 = sequence((1, 2, 3)) s2 = sequence(x**2) assert _test_args(SeqAdd(s1, s2)) def test_sympy__series__sequences__SeqMul(): from sympy.series.sequences import SeqMul, sequence s1 = sequence((1, 2, 3)) s2 = sequence(x**2) assert _test_args(SeqMul(s1, s2)) @SKIP('Abstract Class') def test_sympy__series__series_class__SeriesBase(): pass def test_sympy__series__fourier__FourierSeries(): from sympy.series.fourier import fourier_series assert _test_args(fourier_series(x, (x, -pi, pi))) def test_sympy__series__formal__FormalPowerSeries(): from sympy.series.formal import fps assert _test_args(fps(log(1 + x), x)) def test_sympy__simplify__hyperexpand__Hyper_Function(): from sympy.simplify.hyperexpand import Hyper_Function assert _test_args(Hyper_Function([2], [1])) def test_sympy__simplify__hyperexpand__G_Function(): from sympy.simplify.hyperexpand import G_Function assert _test_args(G_Function([2], [1], [], [])) @SKIP("abstract class") def test_sympy__tensor__array__ndim_array__ImmutableNDimArray(): pass def test_sympy__tensor__array__dense_ndim_array__ImmutableDenseNDimArray(): from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray densarr = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4)) assert _test_args(densarr) def test_sympy__tensor__array__sparse_ndim_array__ImmutableSparseNDimArray(): from sympy.tensor.array.sparse_ndim_array import ImmutableSparseNDimArray sparr = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4)) assert _test_args(sparr) def test_sympy__tensor__functions__TensorProduct(): from sympy.tensor.functions import TensorProduct tp = TensorProduct(3, 4, evaluate=False) assert _test_args(tp) def test_sympy__tensor__indexed__Idx(): from sympy.tensor.indexed import Idx assert _test_args(Idx('test')) assert _test_args(Idx(1, (0, 10))) def test_sympy__tensor__indexed__Indexed(): from sympy.tensor.indexed import Indexed, Idx assert _test_args(Indexed('A', Idx('i'), Idx('j'))) def test_sympy__tensor__indexed__IndexedBase(): from sympy.tensor.indexed import IndexedBase assert _test_args(IndexedBase('A', shape=(x, y))) assert _test_args(IndexedBase('A', 1)) assert _test_args(IndexedBase('A')[0, 1]) def test_sympy__tensor__tensor__TensorIndexType(): from sympy.tensor.tensor import TensorIndexType assert _test_args(TensorIndexType('Lorentz', metric=False)) def test_sympy__tensor__tensor__TensorSymmetry(): from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs assert _test_args(TensorSymmetry(get_symmetric_group_sgs(2))) def test_sympy__tensor__tensor__TensorType(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorType Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) assert _test_args(TensorType([Lorentz], sym)) def test_sympy__tensor__tensor__TensorHead(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, TensorHead Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) assert _test_args(TensorHead('p', S1, 0)) def test_sympy__tensor__tensor__TensorIndex(): from sympy.tensor.tensor import TensorIndexType, TensorIndex Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') assert _test_args(TensorIndex('i', Lorentz)) @SKIP("abstract class") def test_sympy__tensor__tensor__TensExpr(): pass def test_sympy__tensor__tensor__TensAdd(): from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensAdd Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p, q = S1('p,q') t1 = p(a) t2 = q(a) assert _test_args(TensAdd(t1, t2)) def test_sympy__tensor__tensor__Tensor(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') assert _test_args(p(a)) def test_sympy__tensor__tensor__TensMul(): from sympy.core import S from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, TensorType, get_symmetric_group_sgs, tensor_indices, TensMul, TIDS Lorentz = TensorIndexType('Lorentz', dummy_fmt='L') a, b = tensor_indices('a,b', Lorentz) sym = TensorSymmetry(get_symmetric_group_sgs(1)) S1 = TensorType([Lorentz], sym) p = S1('p') q = S1('q') assert _test_args(3*p(a)*q(b)) def test_as_coeff_add(): assert (7, (3*x, 4*x**2)) == (7 + 3*x + 4*x**2).as_coeff_add() def test_sympy__geometry__curve__Curve(): from sympy.geometry.curve import Curve assert _test_args(Curve((x, 1), (x, 0, 1))) def test_sympy__geometry__point__Point(): from sympy.geometry.point import Point assert _test_args(Point(0, 1)) def test_sympy__geometry__point__Point2D(): from sympy.geometry.point import Point2D assert _test_args(Point2D(0, 1)) def test_sympy__geometry__point__Point3D(): from sympy.geometry.point import Point3D assert _test_args(Point3D(0, 1, 2)) def test_sympy__geometry__ellipse__Ellipse(): from sympy.geometry.ellipse import Ellipse assert _test_args(Ellipse((0, 1), 2, 3)) def test_sympy__geometry__ellipse__Circle(): from sympy.geometry.ellipse import Circle assert _test_args(Circle((0, 1), 2)) def test_sympy__geometry__parabola__Parabola(): from sympy.geometry.parabola import Parabola from sympy.geometry.line import Line assert _test_args(Parabola((0, 0), Line((2, 3), (4, 3)))) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity(): pass def test_sympy__geometry__line__Line(): from sympy.geometry.line import Line assert _test_args(Line((0, 1), (2, 3))) def test_sympy__geometry__line__Ray(): from sympy.geometry.line import Ray assert _test_args(Ray((0, 1), (2, 3))) def test_sympy__geometry__line__Segment(): from sympy.geometry.line import Segment assert _test_args(Segment((0, 1), (2, 3))) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity2D(): pass def test_sympy__geometry__line__Line2D(): from sympy.geometry.line import Line2D assert _test_args(Line2D((0, 1), (2, 3))) def test_sympy__geometry__line__Ray2D(): from sympy.geometry.line import Ray2D assert _test_args(Ray2D((0, 1), (2, 3))) def test_sympy__geometry__line__Segment2D(): from sympy.geometry.line import Segment2D assert _test_args(Segment2D((0, 1), (2, 3))) @SKIP("abstract class") def test_sympy__geometry__line__LinearEntity3D(): pass def test_sympy__geometry__line__Line3D(): from sympy.geometry.line import Line3D assert _test_args(Line3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line__Segment3D(): from sympy.geometry.line import Segment3D assert _test_args(Segment3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__line__Ray3D(): from sympy.geometry.line import Ray3D assert _test_args(Ray3D((0, 1, 1), (2, 3, 4))) def test_sympy__geometry__plane__Plane(): from sympy.geometry.plane import Plane assert _test_args(Plane((1, 1, 1), (-3, 4, -2), (1, 2, 3))) def test_sympy__geometry__polygon__Polygon(): from sympy.geometry.polygon import Polygon assert _test_args(Polygon((0, 1), (2, 3), (4, 5), (6, 7))) def test_sympy__geometry__polygon__RegularPolygon(): from sympy.geometry.polygon import RegularPolygon assert _test_args(RegularPolygon((0, 1), 2, 3, 4)) def test_sympy__geometry__polygon__Triangle(): from sympy.geometry.polygon import Triangle assert _test_args(Triangle((0, 1), (2, 3), (4, 5))) def test_sympy__geometry__entity__GeometryEntity(): from sympy.geometry.entity import GeometryEntity from sympy.geometry.point import Point assert _test_args(GeometryEntity(Point(1, 0), 1, [1, 2])) @SKIP("abstract class") def test_sympy__geometry__entity__GeometrySet(): pass def test_sympy__diffgeom__diffgeom__Manifold(): from sympy.diffgeom import Manifold assert _test_args(Manifold('name', 3)) def test_sympy__diffgeom__diffgeom__Patch(): from sympy.diffgeom import Manifold, Patch assert _test_args(Patch('name', Manifold('name', 3))) def test_sympy__diffgeom__diffgeom__CoordSystem(): from sympy.diffgeom import Manifold, Patch, CoordSystem assert _test_args(CoordSystem('name', Patch('name', Manifold('name', 3)))) @XFAIL def test_sympy__diffgeom__diffgeom__Point(): from sympy.diffgeom import Manifold, Patch, CoordSystem, Point assert _test_args(Point( CoordSystem('name', Patch('name', Manifold('name', 3))), [x, y])) def test_sympy__diffgeom__diffgeom__BaseScalarField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseScalarField(cs, 0)) def test_sympy__diffgeom__diffgeom__BaseVectorField(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseVectorField(cs, 0)) def test_sympy__diffgeom__diffgeom__Differential(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(Differential(BaseScalarField(cs, 0))) def test_sympy__diffgeom__diffgeom__Commutator(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, Commutator cs = CoordSystem('name', Patch('name', Manifold('name', 3))) cs1 = CoordSystem('name1', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) v1 = BaseVectorField(cs1, 0) assert _test_args(Commutator(v, v1)) def test_sympy__diffgeom__diffgeom__TensorProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, TensorProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) assert _test_args(TensorProduct(d, d)) def test_sympy__diffgeom__diffgeom__WedgeProduct(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, WedgeProduct cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) d1 = Differential(BaseScalarField(cs, 1)) assert _test_args(WedgeProduct(d, d1)) def test_sympy__diffgeom__diffgeom__LieDerivative(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential, BaseVectorField, LieDerivative cs = CoordSystem('name', Patch('name', Manifold('name', 3))) d = Differential(BaseScalarField(cs, 0)) v = BaseVectorField(cs, 0) assert _test_args(LieDerivative(v, d)) @XFAIL def test_sympy__diffgeom__diffgeom__BaseCovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseCovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) assert _test_args(BaseCovarDerivativeOp(cs, 0, [[[0, ]*3, ]*3, ]*3)) def test_sympy__diffgeom__diffgeom__CovarDerivativeOp(): from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseVectorField, CovarDerivativeOp cs = CoordSystem('name', Patch('name', Manifold('name', 3))) v = BaseVectorField(cs, 0) _test_args(CovarDerivativeOp(v, [[[0, ]*3, ]*3, ]*3)) def test_sympy__categories__baseclasses__Class(): from sympy.categories.baseclasses import Class assert _test_args(Class()) def test_sympy__categories__baseclasses__Object(): from sympy.categories import Object assert _test_args(Object("A")) @XFAIL def test_sympy__categories__baseclasses__Morphism(): from sympy.categories import Object, Morphism assert _test_args(Morphism(Object("A"), Object("B"))) def test_sympy__categories__baseclasses__IdentityMorphism(): from sympy.categories import Object, IdentityMorphism assert _test_args(IdentityMorphism(Object("A"))) def test_sympy__categories__baseclasses__NamedMorphism(): from sympy.categories import Object, NamedMorphism assert _test_args(NamedMorphism(Object("A"), Object("B"), "f")) def test_sympy__categories__baseclasses__CompositeMorphism(): from sympy.categories import Object, NamedMorphism, CompositeMorphism A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") assert _test_args(CompositeMorphism(f, g)) def test_sympy__categories__baseclasses__Diagram(): from sympy.categories import Object, NamedMorphism, Diagram A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") d = Diagram([f]) assert _test_args(d) def test_sympy__categories__baseclasses__Category(): from sympy.categories import Object, NamedMorphism, Diagram, Category A = Object("A") B = Object("B") C = Object("C") f = NamedMorphism(A, B, "f") g = NamedMorphism(B, C, "g") d1 = Diagram([f, g]) d2 = Diagram([f]) K = Category("K", commutative_diagrams=[d1, d2]) assert _test_args(K) def test_sympy__ntheory__factor___totient(): from sympy.ntheory.factor_ import totient k = symbols('k', integer=True) t = totient(k) assert _test_args(t) def test_sympy__ntheory__factor___reduced_totient(): from sympy.ntheory.factor_ import reduced_totient k = symbols('k', integer=True) t = reduced_totient(k) assert _test_args(t) def test_sympy__ntheory__factor___divisor_sigma(): from sympy.ntheory.factor_ import divisor_sigma k = symbols('k', integer=True) n = symbols('n', integer=True) t = divisor_sigma(n, k) assert _test_args(t) def test_sympy__ntheory__factor___udivisor_sigma(): from sympy.ntheory.factor_ import udivisor_sigma k = symbols('k', integer=True) n = symbols('n', integer=True) t = udivisor_sigma(n, k) assert _test_args(t) def test_sympy__ntheory__factor___primenu(): from sympy.ntheory.factor_ import primenu n = symbols('n', integer=True) t = primenu(n) assert _test_args(t) def test_sympy__ntheory__factor___primeomega(): from sympy.ntheory.factor_ import primeomega n = symbols('n', integer=True) t = primeomega(n) assert _test_args(t) def test_sympy__ntheory__residue_ntheory__mobius(): from sympy.ntheory import mobius assert _test_args(mobius(2)) def test_sympy__physics__optics__waves__TWave(): from sympy.physics.optics import TWave A, f, phi = symbols('A, f, phi') assert _test_args(TWave(A, f, phi)) def test_sympy__physics__optics__gaussopt__BeamParameter(): from sympy.physics.optics import BeamParameter assert _test_args(BeamParameter(530e-9, 1, w=1e-3)) def test_sympy__physics__optics__medium__Medium(): from sympy.physics.optics import Medium assert _test_args(Medium('m')) def test_sympy__codegen__ast__Assignment(): from sympy.codegen.ast import Assignment assert _test_args(Assignment(x, y)) def test_sympy__codegen__cfunctions__expm1(): from sympy.codegen.cfunctions import expm1 assert _test_args(expm1(x)) def test_sympy__codegen__cfunctions__log1p(): from sympy.codegen.cfunctions import log1p assert _test_args(log1p(x)) def test_sympy__codegen__cfunctions__exp2(): from sympy.codegen.cfunctions import exp2 assert _test_args(exp2(x)) def test_sympy__codegen__cfunctions__log2(): from sympy.codegen.cfunctions import log2 assert _test_args(log2(x)) def test_sympy__codegen__cfunctions__fma(): from sympy.codegen.cfunctions import fma assert _test_args(fma(x, y, z)) def test_sympy__codegen__cfunctions__log10(): from sympy.codegen.cfunctions import log10 assert _test_args(log10(x)) def test_sympy__codegen__cfunctions__Sqrt(): from sympy.codegen.cfunctions import Sqrt assert _test_args(Sqrt(x)) def test_sympy__codegen__cfunctions__Cbrt(): from sympy.codegen.cfunctions import Cbrt assert _test_args(Cbrt(x)) def test_sympy__codegen__cfunctions__hypot(): from sympy.codegen.cfunctions import hypot assert _test_args(hypot(x, y)) def test_sympy__codegen__fnodes__FFunction(): from sympy.codegen.fnodes import FFunction assert _test_args(FFunction('f')) def test_sympy__codegen__fnodes__F95Function(): from sympy.codegen.fnodes import F95Function assert _test_args(F95Function('f')) def test_sympy__codegen__fnodes__isign(): from sympy.codegen.fnodes import isign assert _test_args(isign(1, x)) def test_sympy__codegen__fnodes__dsign(): from sympy.codegen.fnodes import dsign assert _test_args(dsign(1, x)) def test_sympy__codegen__fnodes__cmplx(): from sympy.codegen.fnodes import cmplx assert _test_args(cmplx(x, y)) def test_sympy__codegen__fnodes__kind(): from sympy.codegen.fnodes import kind assert _test_args(kind(x)) def test_sympy__codegen__fnodes__merge(): from sympy.codegen.fnodes import merge assert _test_args(merge(1, 2, Eq(x, 0))) def test_sympy__codegen__fnodes___literal(): from sympy.codegen.fnodes import _literal assert _test_args(_literal(1)) def test_sympy__codegen__fnodes__literal_sp(): from sympy.codegen.fnodes import literal_sp assert _test_args(literal_sp(1)) def test_sympy__codegen__fnodes__literal_dp(): from sympy.codegen.fnodes import literal_dp assert _test_args(literal_dp(1)) def test_sympy__vector__coordsysrect__CoordSys3D(): from sympy.vector.coordsysrect import CoordSys3D assert _test_args(CoordSys3D('C')) def test_sympy__vector__point__Point(): from sympy.vector.point import Point assert _test_args(Point('P')) def test_sympy__vector__basisdependent__BasisDependent(): from sympy.vector.basisdependent import BasisDependent #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentMul(): from sympy.vector.basisdependent import BasisDependentMul #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentAdd(): from sympy.vector.basisdependent import BasisDependentAdd #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__basisdependent__BasisDependentZero(): from sympy.vector.basisdependent import BasisDependentZero #These classes have been created to maintain an OOP hierarchy #for Vectors and Dyadics. Are NOT meant to be initialized def test_sympy__vector__vector__BaseVector(): from sympy.vector.vector import BaseVector from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(BaseVector(0, C, ' ', ' ')) def test_sympy__vector__vector__VectorAdd(): from sympy.vector.vector import VectorAdd, VectorMul from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') from sympy.abc import a, b, c, x, y, z v1 = a*C.i + b*C.j + c*C.k v2 = x*C.i + y*C.j + z*C.k assert _test_args(VectorAdd(v1, v2)) assert _test_args(VectorMul(x, v1)) def test_sympy__vector__vector__VectorMul(): from sympy.vector.vector import VectorMul from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') from sympy.abc import a assert _test_args(VectorMul(a, C.i)) def test_sympy__vector__vector__VectorZero(): from sympy.vector.vector import VectorZero assert _test_args(VectorZero()) def test_sympy__vector__vector__Vector(): from sympy.vector.vector import Vector #Vector is never to be initialized using args pass def test_sympy__vector__vector__Cross(): from sympy.vector.vector import Cross from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') _test_args(Cross(C.i, C.j)) def test_sympy__vector__vector__Dot(): from sympy.vector.vector import Dot from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') _test_args(Dot(C.i, C.j)) def test_sympy__vector__dyadic__Dyadic(): from sympy.vector.dyadic import Dyadic #Dyadic is never to be initialized using args pass def test_sympy__vector__dyadic__BaseDyadic(): from sympy.vector.dyadic import BaseDyadic from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(BaseDyadic(C.i, C.j)) def test_sympy__vector__dyadic__DyadicMul(): from sympy.vector.dyadic import BaseDyadic, DyadicMul from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(DyadicMul(3, BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicAdd(): from sympy.vector.dyadic import BaseDyadic, DyadicAdd from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(2 * DyadicAdd(BaseDyadic(C.i, C.i), BaseDyadic(C.i, C.j))) def test_sympy__vector__dyadic__DyadicZero(): from sympy.vector.dyadic import DyadicZero assert _test_args(DyadicZero()) def test_sympy__vector__deloperator__Del(): from sympy.vector.deloperator import Del assert _test_args(Del()) def test_sympy__vector__operators__Curl(): from sympy.vector.operators import Curl from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(Curl(C.i)) def test_sympy__vector__operators__Divergence(): from sympy.vector.operators import Divergence from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(Divergence(C.i)) def test_sympy__vector__operators__Gradient(): from sympy.vector.operators import Gradient from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(Gradient(C.x)) def test_sympy__vector__orienters__Orienter(): from sympy.vector.orienters import Orienter #Not to be initialized def test_sympy__vector__orienters__ThreeAngleOrienter(): from sympy.vector.orienters import ThreeAngleOrienter #Not to be initialized def test_sympy__vector__orienters__AxisOrienter(): from sympy.vector.orienters import AxisOrienter from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(AxisOrienter(x, C.i)) def test_sympy__vector__orienters__BodyOrienter(): from sympy.vector.orienters import BodyOrienter assert _test_args(BodyOrienter(x, y, z, '123')) def test_sympy__vector__orienters__SpaceOrienter(): from sympy.vector.orienters import SpaceOrienter assert _test_args(SpaceOrienter(x, y, z, '123')) def test_sympy__vector__orienters__QuaternionOrienter(): from sympy.vector.orienters import QuaternionOrienter a, b, c, d = symbols('a b c d') assert _test_args(QuaternionOrienter(a, b, c, d)) def test_sympy__vector__scalar__BaseScalar(): from sympy.vector.scalar import BaseScalar from sympy.vector.coordsysrect import CoordSys3D C = CoordSys3D('C') assert _test_args(BaseScalar(0, C, ' ', ' ')) def test_sympy__physics__wigner__Wigner3j(): from sympy.physics.wigner import Wigner3j assert _test_args(Wigner3j(0, 0, 0, 0, 0, 0)) def test_sympy__integrals__rubi__symbol__matchpyWC(): from sympy.integrals.rubi.symbol import matchpyWC assert _test_args(matchpyWC(1, True, 'a'))
sclabs/sccms-nonrel
refs/heads/master
django/core/files/uploadhandler.py
136
""" Base file upload handler classes, and the built-in concrete subclasses """ try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile from django.utils import importlib __all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler', 'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler', 'StopFutureHandlers'] class UploadFileException(Exception): """ Any error having to do with uploading files. """ pass class StopUpload(UploadFileException): """ This exception is raised when an upload must abort. """ def __init__(self, connection_reset=False): """ If ``connection_reset`` is ``True``, Django knows will halt the upload without consuming the rest of the upload. This will cause the browser to show a "connection reset" error. """ self.connection_reset = connection_reset def __unicode__(self): if self.connection_reset: return u'StopUpload: Halt current upload.' else: return u'StopUpload: Consume request data, then halt.' class SkipFile(UploadFileException): """ This exception is raised by an upload handler that wants to skip a given file. """ pass class StopFutureHandlers(UploadFileException): """ Upload handers that have handled a file and do not want future handlers to run should raise this exception instead of returning None. """ pass class FileUploadHandler(object): """ Base class for streaming upload handlers. """ chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB. def __init__(self, request=None): self.file_name = None self.content_type = None self.content_length = None self.charset = None self.request = request def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Handle the raw input from the client. Parameters: :input_data: An object that supports reading via .read(). :META: ``request.META``. :content_length: The (integer) value of the Content-Length header from the client. :boundary: The boundary from the Content-Type header. Be sure to prepend two '--'. """ pass def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None): """ Signal that a new file has been started. Warning: As with any data from the client, you should not trust content_length (and sometimes won't even get it). """ self.field_name = field_name self.file_name = file_name self.content_type = content_type self.content_length = content_length self.charset = charset if content_type_extra is None: content_type_extra = {} self.content_type_extra = content_type_extra def receive_data_chunk(self, raw_data, start): """ Receive data from the streamed upload parser. ``start`` is the position in the file of the chunk. """ raise NotImplementedError() def file_complete(self, file_size): """ Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid ``UploadedFile`` object. """ raise NotImplementedError() def upload_complete(self): """ Signal that the upload is complete. Subclasses should perform cleanup that is necessary for this handler. """ pass class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def __init__(self, *args, **kwargs): super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs) def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file class MemoryFileUploadHandler(FileUploadHandler): """ File upload handler to stream uploads into memory (used for small files). """ def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ Use the content_length to signal whether or not this handler should be in use. """ # Check the content-length header to see if we should # If the post is too large, we cannot use the Memory handler. if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE: self.activated = False else: self.activated = True def new_file(self, *args, **kwargs): super(MemoryFileUploadHandler, self).new_file(*args, **kwargs) if self.activated: self.file = StringIO() raise StopFutureHandlers() def receive_data_chunk(self, raw_data, start): """ Add the data to the StringIO file. """ if self.activated: self.file.write(raw_data) else: return raw_data def file_complete(self, file_size): """ Return a file object if we're activated. """ if not self.activated: return self.file.seek(0) return InMemoryUploadedFile( file = self.file, field_name = self.field_name, name = self.file_name, content_type = self.content_type, size = file_size, charset = self.charset ) def load_handler(path, *args, **kwargs): """ Given a path to a handler, return an instance of that handler. E.g.:: >>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request) <TemporaryFileUploadHandler object at 0x...> """ i = path.rfind('.') module, attr = path[:i], path[i+1:] try: mod = importlib.import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e)) except ValueError, e: raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?') try: cls = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr)) return cls(*args, **kwargs)
Timonzimm/CS-401
refs/heads/master
project/api/api.py
1
import sqlite3 from scipy.stats import gaussian_kde from flask import Flask, g, jsonify from flask_cors import CORS app = Flask(__name__) CORS(app) DATABASE = 'data/database.sqlite' def get_db(): db = getattr(g, '_database', None) if db is None: db = g._database = sqlite3.connect(DATABASE) return db @app.teardown_appcontext def close_connection(exception): db = getattr(g, '_database', None) if db is not None: db.close() @app.route('/countries') def all_countries(): cur = get_db().execute('SELECT ShortName, CountryCode FROM Country') countries_infos = cur.fetchall() cur.close() return jsonify(countries_infos) @app.route('/country/<string:country>') def informations_by_country(country): """Returns the coordinates of all attacks of the given country.""" cur = get_db().execute('SELECT ShortName, Region, IncomeGroup FROM Country WHERE CountryCode="{}"'.format(country)) country_infos = cur.fetchall() cur.close() return jsonify(country_infos[0]) @app.route('/coords/<string:country>') def attack_coordinates_by_country(country): """Returns the coordinates of all attacks of the given country.""" cur = get_db().execute('SELECT longitude, latitude FROM Attacks WHERE iso_code="{}"'.format(country)) attack_coords = cur.fetchall() cur.close() return jsonify(attack_coords) @app.route('/coords/year/<int:year>') def attack_coordinates_by_year(year): """Returns the coordinates of all attacks of the given year.""" cur = get_db().execute('SELECT longitude, latitude FROM Attacks WHERE iyear={}'.format(year)) attack_coords = cur.fetchall() cur.close() return jsonify(attack_coords) @app.route('/attacks/countries') def all_attacks(): """Returns the total number of attacks for each country.""" cur = get_db().execute('SELECT iso_code, COUNT(*) as num_attacks FROM Attacks GROUP BY iso_code') num_attacks = cur.fetchall() cur.close() return jsonify(num_attacks) @app.route('/attacks/types/<string:country>') def attack_types_by_country(country): """Returns the types list with the corresponding number of attacks in descending order of the given country.""" cur = get_db().execute('SELECT attacktype1_txt, num_attacks FROM (SELECT attacktype1_txt, COUNT(attacktype1_txt) num_attacks FROM Attacks WHERE iso_code="{}" GROUP BY attacktype1_txt) ORDER BY num_attacks DESC'.format(country)) attack_types = cur.fetchall() cur.close() return jsonify(attack_types) @app.route('/attacks/targets/<string:country>') def attack_targets_by_country(country): """Returns the targets list with the corresponding number of attacks in descending order of the given country.""" cur = get_db().execute('SELECT targtype1_txt, num_attacks FROM (SELECT targtype1_txt, COUNT(targtype1_txt) num_attacks FROM Attacks WHERE iso_code="{}" GROUP BY targtype1_txt) ORDER BY num_attacks DESC'.format(country)) attack_targets = cur.fetchall() cur.close() return jsonify(attack_targets) @app.route('/attacks/perpetrators/<string:country>') def attack_perpetrators_by_country(country): """Returns the perpetrators list with the number of attacks corresponding to their attacks in descending order of the given country.""" cur = get_db().execute('SELECT gname, num_attacks FROM (SELECT gname, COUNT(gname) num_attacks FROM Attacks WHERE iso_code="{}" GROUP BY gname) ORDER BY num_attacks DESC'.format(country)) attack_perpetrators = cur.fetchall() cur.close() return jsonify(attack_perpetrators) @app.route('/attacks/num_victims/<string:country>') def num_victims_per_year_by_country(country): """Returns the number of victims per year of the given country.""" cur = get_db().execute('SELECT iyear, SUM(nkill) FROM Attacks WHERE iso_code="{}" GROUP BY iyear'.format(country)) num_victims = cur.fetchall() cur.close() return jsonify(num_victims) @app.route('/attacks/num_attacks/<string:country>') def num_attacks_per_year_by_country(country): """Returns the number of attacks per year of the given country.""" cur = get_db().execute('SELECT iyear, COUNT(*) FROM Attacks WHERE iso_code="{}" GROUP BY iyear'.format(country)) num_attacks = cur.fetchall() cur.close() return jsonify(num_attacks) @app.route('/score/<string:country>') def score_per_year_by_country(country): """Returns the Global Terrorism Index (GTI) per year of the given country.""" cur = get_db().execute('''SELECT iyear, ( 1*COUNT(*) + 3*SUM(nkill) + 0.5*SUM(nwound) + 2*SUM(case propextent when 1.0 then 1 else 0 end) + 2*SUM(case propextent when 2.0 then 1 else 0 end) + 2*SUM(case propextent when 3.0 then 1 else 0 end) + 2*SUM(case propextent when 4.0 then 1 else 0 end)) FROM Attacks WHERE iso_code="{}" GROUP BY iyear''' .format(country)) score = cur.fetchall() cur.close() return jsonify(score) # Economic development indicators @app.route('/development/economy/electric_consumption/<string:country>') def electric_consumption_per_year_by_country(country): """Returns the electric consumption (kWh per capita) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="EG.USE.ELEC.KH.PC"'.format(country)) electric_consumption = cur.fetchall() cur.close() return jsonify(electric_consumption) @app.route('/development/economy/co2_emissions/<string:country>') def co2_emissions_per_year_by_country(country): """Returns the CO2 emissions (in kt) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="EN.ATM.CO2E.KT"'.format(country)) co2_emissions = cur.fetchall() cur.close() return jsonify(co2_emissions) @app.route('/development/economy/total_reserves/<string:country>') def total_reserves_per_year_by_country(country): """Returns the total reserves (minus gold) in US$ per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="FI.RES.XGLD.CD"'.format(country)) total_reserves = cur.fetchall() cur.close() return jsonify(total_reserves) @app.route('/development/economy/arm_imports/<string:country>') def arm_imports_per_year_by_country(country): """Returns the arm imports (SIPRI) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="MS.MIL.MPRT.KD"'.format(country)) arm_imports = cur.fetchall() cur.close() return jsonify(arm_imports) @app.route('/development/economy/arm_exports/<string:country>') def arm_exports_per_year_by_country(country): """Returns the arm exports (SIPRI) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="MS.MIL.XPRT.KD"'.format(country)) arm_exports = cur.fetchall() cur.close() return jsonify(arm_exports) @app.route('/development/economy/gs_imports/<string:country>') def gs_imports_per_year_by_country(country): """Returns the good and service imports (annual % growth) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="NE.IMP.GNFS.KD.ZG"'.format(country)) gs_imports = cur.fetchall() cur.close() return jsonify(gs_imports) @app.route('/development/economy/gs_exports/<string:country>') def gs_exports_per_year_by_country(country): """Returns the good and service exports (annual % growth) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="NE.EXP.GNFS.KD.ZG"'.format(country)) gs_exports = cur.fetchall() cur.close() return jsonify(gs_exports) @app.route('/development/economy/gdp/<string:country>') def gdp_per_year_by_country(country): """Returns the GDP (annual % growth) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="NY.GDP.MKTP.KD.ZG"'.format(country)) gdp = cur.fetchall() cur.close() return jsonify(gdp) @app.route('/development/economy/gni/<string:country>') def gni_per_year_by_country(country): """Returns the GNI (annual % growth) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="NY.GNP.MKTP.KD.ZG"'.format(country)) gni = cur.fetchall() cur.close() return jsonify(gni) @app.route('/development/economy/tourism/<string:country>') def tourism_per_year_by_country(country): """Returns the number of arrivals (tourism) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="ST.INT.ARVL"'.format(country)) tourism = cur.fetchall() cur.close() return jsonify(tourism) @app.route('/development/economy/foreign_inv/<string:country>') def foreign_inv_per_year_by_country(country): """Returns the foreign direct investment (net inflows % of GDP) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="BX.KLT.DINV.WD.GD.ZS"'.format(country)) foreign_inv = cur.fetchall() cur.close() return jsonify(foreign_inv) # Social health development indicators @app.route('/development/social_health/mortality_rate_under_5/<string:country>') def mortality_rate_under_5_per_year_by_country(country): """Returns the mortality rate under 5 (per 1,000 people) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SH.DYN.MORT"'.format(country)) mortality_rate_under_5 = cur.fetchall() cur.close() return jsonify(mortality_rate_under_5) @app.route('/development/social_health/hospital_beds/<string:country>') def hospital_beds_per_year_by_country(country): """Returns the number of hospital beds (per 1,000 people) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SH.MED.BEDS.ZS"'.format(country)) hospital_beds = cur.fetchall() cur.close() return jsonify(hospital_beds) @app.route('/development/social_health/birth_rate/<string:country>') def birth_rate_per_year_by_country(country): """Returns the birth rate (per 1,000 people) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.DYN.CBRT.IN"'.format(country)) birth_rate = cur.fetchall() cur.close() return jsonify(birth_rate) @app.route('/development/social_health/death_rate/<string:country>') def death_rate_per_year_by_country(country): """Returns the death rate (per 1,000 people) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.DYN.CDRT.IN"'.format(country)) death_rate = cur.fetchall() cur.close() return jsonify(death_rate) @app.route('/development/social_health/population_dens/<string:country>') def population_dens_per_year_by_country(country): """Returns the population density (people per square km of land area) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="EN.POP.DNST"'.format(country)) population_dens = cur.fetchall() cur.close() return jsonify(population_dens) @app.route('/development/social_health/armed_forces/<string:country>') def armed_forces_per_year_by_country(country): """Returns the armed forces personnel (total) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="MS.MIL.TOTL.P1"'.format(country)) armed_forces = cur.fetchall() cur.close() return jsonify(armed_forces) # Population development indicators @app.route('/development/social_health/population_0_14/<string:country>') def population_0_14_per_year_by_country(country): """Returns the population aged between 0-14 (in %) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.POP.0014.TO.ZS"'.format(country)) population_0_14 = cur.fetchall() cur.close() return jsonify(population_0_14) @app.route('/development/social_health/population_15_64/<string:country>') def population_15_64_per_year_by_country(country): """Returns the population aged between 15-64 (in %) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.POP.1564.TO.ZS"'.format(country)) population_15_64 = cur.fetchall() cur.close() return jsonify(population_15_64) @app.route('/development/social_health/population_65_up/<string:country>') def population_65_up_per_year_by_country(country): """Returns the population aged between 65 and above (in %) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.POP.65UP.TO.ZS"'.format(country)) population_65_up = cur.fetchall() cur.close() return jsonify(population_65_up) @app.route('/development/social_health/population_growth/<string:country>') def population_growth_per_year_by_country(country): """Returns the population annual % growth per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="SP.POP.GROW"'.format(country)) population_growth = cur.fetchall() cur.close() return jsonify(population_growth) # Wealth development indicators @app.route('/development/wealth/renewable_energy_cons/<string:country>') def renewable_energy_cons_per_year_by_country(country): """Returns the renewable energy consumption (% of total final energy consumption) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="EG.FEC.RNEW.ZS"'.format(country)) renewable_energy_cons = cur.fetchall() cur.close() return jsonify(renewable_energy_cons) @app.route('/development/wealth/air_transport/<string:country>') def air_transport_per_year_by_country(country): """Returns the number of passenger carried per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="IS.AIR.PSGR"'.format(country)) air_transport = cur.fetchall() cur.close() return jsonify(air_transport) @app.route('/development/wealth/internet_users/<string:country>') def internet_users_per_year_by_country(country): """Returns the internet users (per 1,000 people) per year of the given country.""" cur = get_db().execute('SELECT Year, Value FROM Indicators WHERE CountryCode="{}" AND IndicatorCode="IT.NET.USER.P2"'.format(country)) internet_users = cur.fetchall() cur.close() return jsonify(internet_users) if __name__ == "__main__": app.run(host="0.0.0.0", port=5000, debug=True, threaded=True)
CyanogenMod/android_kernel_samsung_galaxytab-cdma
refs/heads/cm-10.2
scripts/rt-tester/rt-tester.py
1094
#!/usr/bin/python # # rt-mutex tester # # (C) 2006 Thomas Gleixner <tglx@linutronix.de> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # import os import sys import getopt import shutil import string # Globals quiet = 0 test = 0 comments = 0 sysfsprefix = "/sys/devices/system/rttest/rttest" statusfile = "/status" commandfile = "/command" # Command opcodes cmd_opcodes = { "schedother" : "1", "schedfifo" : "2", "lock" : "3", "locknowait" : "4", "lockint" : "5", "lockintnowait" : "6", "lockcont" : "7", "unlock" : "8", "lockbkl" : "9", "unlockbkl" : "10", "signal" : "11", "resetevent" : "98", "reset" : "99", } test_opcodes = { "prioeq" : ["P" , "eq" , None], "priolt" : ["P" , "lt" , None], "priogt" : ["P" , "gt" , None], "nprioeq" : ["N" , "eq" , None], "npriolt" : ["N" , "lt" , None], "npriogt" : ["N" , "gt" , None], "unlocked" : ["M" , "eq" , 0], "trylock" : ["M" , "eq" , 1], "blocked" : ["M" , "eq" , 2], "blockedwake" : ["M" , "eq" , 3], "locked" : ["M" , "eq" , 4], "opcodeeq" : ["O" , "eq" , None], "opcodelt" : ["O" , "lt" , None], "opcodegt" : ["O" , "gt" , None], "eventeq" : ["E" , "eq" , None], "eventlt" : ["E" , "lt" , None], "eventgt" : ["E" , "gt" , None], } # Print usage information def usage(): print "rt-tester.py <-c -h -q -t> <testfile>" print " -c display comments after first command" print " -h help" print " -q quiet mode" print " -t test mode (syntax check)" print " testfile: read test specification from testfile" print " otherwise from stdin" return # Print progress when not in quiet mode def progress(str): if not quiet: print str # Analyse a status value def analyse(val, top, arg): intval = int(val) if top[0] == "M": intval = intval / (10 ** int(arg)) intval = intval % 10 argval = top[2] elif top[0] == "O": argval = int(cmd_opcodes.get(arg, arg)) else: argval = int(arg) # progress("%d %s %d" %(intval, top[1], argval)) if top[1] == "eq" and intval == argval: return 1 if top[1] == "lt" and intval < argval: return 1 if top[1] == "gt" and intval > argval: return 1 return 0 # Parse the commandline try: (options, arguments) = getopt.getopt(sys.argv[1:],'chqt') except getopt.GetoptError, ex: usage() sys.exit(1) # Parse commandline options for option, value in options: if option == "-c": comments = 1 elif option == "-q": quiet = 1 elif option == "-t": test = 1 elif option == '-h': usage() sys.exit(0) # Select the input source if arguments: try: fd = open(arguments[0]) except Exception,ex: sys.stderr.write("File not found %s\n" %(arguments[0])) sys.exit(1) else: fd = sys.stdin linenr = 0 # Read the test patterns while 1: linenr = linenr + 1 line = fd.readline() if not len(line): break line = line.strip() parts = line.split(":") if not parts or len(parts) < 1: continue if len(parts[0]) == 0: continue if parts[0].startswith("#"): if comments > 1: progress(line) continue if comments == 1: comments = 2 progress(line) cmd = parts[0].strip().lower() opc = parts[1].strip().lower() tid = parts[2].strip() dat = parts[3].strip() try: # Test or wait for a status value if cmd == "t" or cmd == "w": testop = test_opcodes[opc] fname = "%s%s%s" %(sysfsprefix, tid, statusfile) if test: print fname continue while 1: query = 1 fsta = open(fname, 'r') status = fsta.readline().strip() fsta.close() stat = status.split(",") for s in stat: s = s.strip() if s.startswith(testop[0]): # Seperate status value val = s[2:].strip() query = analyse(val, testop, dat) break if query or cmd == "t": break progress(" " + status) if not query: sys.stderr.write("Test failed in line %d\n" %(linenr)) sys.exit(1) # Issue a command to the tester elif cmd == "c": cmdnr = cmd_opcodes[opc] # Build command string and sys filename cmdstr = "%s:%s" %(cmdnr, dat) fname = "%s%s%s" %(sysfsprefix, tid, commandfile) if test: print fname continue fcmd = open(fname, 'w') fcmd.write(cmdstr) fcmd.close() except Exception,ex: sys.stderr.write(str(ex)) sys.stderr.write("\nSyntax error in line %d\n" %(linenr)) if not test: fd.close() sys.exit(1) # Normal exit pass print "Pass" sys.exit(0)
IntelPython/BlackScholes_bench
refs/heads/master
bs_erf_numba_jit_par.py
1
# Copyright (C) 2017-2018 Intel Corporation # # SPDX-License-Identifier: MIT import base_bs_erf import numba as nb from math import log, sqrt, exp, erf @nb.njit(error_model='numpy', fastmath=False, parallel=True) def black_scholes( nopt, price, strike, t, rate, vol, call, put): mr = -rate sig_sig_two = vol * vol * 2 for i in nb.prange(nopt): P = price[i] S = strike [i] T = t [i] a = log(P / S) b = T * mr z = T * sig_sig_two c = 0.25 * z y = 1./sqrt(z) w1 = (a - b + c) * y w2 = (a - b - c) * y d1 = 0.5 + 0.5 * erf(w1) d2 = 0.5 + 0.5 * erf(w2) Se = exp(b) * S r = P * d1 - Se * d2 call [i] = r put [i] = r - P + Se if __name__ == '__main__': base_bs_erf.run("Numba@jit-loop-par", black_scholes, nparr=True, pass_args=True)
TeamExodus/external_chromium_org
refs/heads/EXODUS-5.1
mojo/nacl/generator/generate_nacl_bindings.py
36
#!/usr/bin/python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0104,W0106,F0401,R0201 import optparse import os.path import sys import interface def _ScriptDir(): return os.path.dirname(os.path.abspath(__file__)) def _GetDirAbove(dirname): """Returns the directory "above" this file containing |dirname| (which must also be "above" this file).""" path = _ScriptDir() while True: path, tail = os.path.split(path) assert tail if tail == dirname: return path def _AddThirdPartyImportPath(): sys.path.append(os.path.join(_GetDirAbove('mojo'), 'third_party')) _AddThirdPartyImportPath() import jinja2 loader = jinja2.FileSystemLoader(_ScriptDir()) jinja_env = jinja2.Environment(loader=loader, keep_trailing_newline=True) # Accumulate lines of code with varying levels of indentation. class CodeWriter(object): def __init__(self): self._lines = [] self._margin = '' self._margin_stack = [] def __lshift__(self, line): self._lines.append((self._margin + line).rstrip()) def PushMargin(self): self._margin_stack.append(self._margin) self._margin += ' ' def PopMargin(self): self._margin = self._margin_stack.pop() def GetValue(self): return '\n'.join(self._lines).rstrip() + '\n' def Indent(self): return Indent(self) # Context handler that automatically indents and dedents a CodeWriter class Indent(object): def __init__(self, writer): self._writer = writer def __enter__(self): self._writer.PushMargin() def __exit__(self, type_, value, traceback): self._writer.PopMargin() def TemplateFile(name): return os.path.join(os.path.dirname(__file__), name) # Wraps comma separated lists as needed. def Wrap(pre, items, post): complete = pre + ', '.join(items) + post if len(complete) <= 80: return [complete] lines = [pre] indent = ' ' for i, item in enumerate(items): if i < len(items) - 1: lines.append(indent + item + ',') else: lines.append(indent + item + post) return lines def GeneratorWarning(): return ('// WARNING this file was generated by %s\n// Do not edit by hand.' % os.path.basename(__file__)) # Untrusted library implementing the public Mojo API. def GenerateLibMojo(functions, out): template = jinja_env.get_template('libmojo.cc.tmpl') code = CodeWriter() for f in functions: for line in Wrap('%s %s(' % (f.return_type, f.name), f.ParamList(), ') {'): code << line # 2 extra parameters: message ID and return value. num_params = len(f.params) + 2 with code.Indent(): code << 'uint32_t params[%d];' % num_params return_type = f.result_param.base_type if return_type == 'MojoResult': default = 'MOJO_RESULT_INVALID_ARGUMENT' elif return_type == 'MojoTimeTicks': default = '0' else: raise Exception('Unhandled return type: ' + return_type) code << '%s %s = %s;' % (return_type, f.result_param.name, default) # Message ID code << 'params[0] = %d;' % f.uid # Parameter pointers cast_template = 'params[%d] = reinterpret_cast<uint32_t>(%s);' for p in f.params: ptr = p.name if p.IsPassedByValue(): ptr = '&' + ptr code << cast_template % (p.uid + 1, ptr) # Return value pointer code << cast_template % (num_params - 1, '&' + f.result_param.name) code << 'DoMojoCall(params, sizeof(params));' code << 'return %s;' % f.result_param.name code << '}' code << '' body = code.GetValue() text = template.render( generator_warning=GeneratorWarning(), body=body) out.write(text) # Parameters passed into trusted code are handled differently depending on # details of the parameter. ParamImpl instances encapsulate these differences # and are used to generate the code that transfers parameters across the # untrusted/trusted boundary. class ParamImpl(object): def __init__(self, param): self.param = param # Declare whatever variables are needed to handle this particular parameter. def DeclareVars(self, code): raise NotImplementedError() # Convert the untrusted representation of the parameter into a trusted # representation, such as a scalar value or a trusted pointer into the # untrusted address space. def ConvertParam(self): raise NotImplementedError() # For this particular parameter, what expression should be passed when # invoking the trusted Mojo API function? def CallParam(self): raise NotImplementedError() # After invoking the trusted Mojo API function, transfer data back into # untrusted memory. Overriden for Out and InOut parameters. def CopyOut(self, code): pass # Converting array parameters needs to be defered until after the scalar # parameter containing the size of the array has itself been converted. def IsArray(self): return False class ScalarInputImpl(ParamImpl): def DeclareVars(self, code): code << '%s %s_value;' % (self.param.base_type, self.param.name) def ConvertParam(self): p = self.param return ('ConvertScalarInput(nap, params[%d], &%s_value)' % (p.uid + 1, p.name)) def CallParam(self): return '%s_value' % self.param.name class ScalarOutputImpl(ParamImpl): def DeclareVars(self, code): code << '%s volatile* %s_ptr;' % (self.param.base_type, self.param.name) code << '%s %s_value;' % (self.param.base_type, self.param.name) def ConvertParam(self): p = self.param return 'ConvertScalarOutput(nap, params[%d], &%s_ptr)' % (p.uid + 1, p.name) def CallParam(self): return '&%s_value' % self.param.name def CopyOut(self, code): name = self.param.name code << '*%s_ptr = %s_value;' % (name, name) class ScalarInOutImpl(ParamImpl): def DeclareVars(self, code): code << '%s volatile* %s_ptr;' % (self.param.base_type, self.param.name) code << '%s %s_value;' % (self.param.base_type, self.param.name) def ConvertParam(self): p = self.param return ('ConvertScalarInOut(nap, params[%d], %s, &%s_value, &%s_ptr)' % (p.uid + 1, CBool(p.is_optional), p.name, p.name)) def CallParam(self): name = self.param.name expr = '&%s_value' % name if self.param.is_optional: expr = '%s_ptr ? %s : NULL' % (name, expr) return expr def CopyOut(self, code): name = self.param.name if self.param.is_optional: code << 'if (%s_ptr != NULL) {' % (name) with code.Indent(): code << '*%s_ptr = %s_value;' % (name, name) code << '}' else: code << '*%s_ptr = %s_value;' % (name, name) class ArrayImpl(ParamImpl): def DeclareVars(self, code): code << '%s %s;' % (self.param.param_type, self.param.name) def ConvertParam(self): p = self.param if p.base_type == 'void': element_size = '1' else: element_size = 'sizeof(*%s)' % p.name return ('ConvertArray(nap, params[%d], %s, %s, %s, &%s)' % (p.uid + 1, p.size + '_value', element_size, CBool(p.is_optional), p.name)) def CallParam(self): return self.param.name def IsArray(self): return True class StructInputImpl(ParamImpl): def DeclareVars(self, code): code << '%s %s;' % (self.param.param_type, self.param.name) def ConvertParam(self): p = self.param return ('ConvertStruct(nap, params[%d], %s, &%s)' % (p.uid + 1, CBool(p.is_optional), p.name)) def CallParam(self): return self.param.name def ImplForParam(p): if p.IsScalar(): if p.is_output: if p.is_input: return ScalarInOutImpl(p) else: return ScalarOutputImpl(p) else: return ScalarInputImpl(p) elif p.is_array: return ArrayImpl(p) elif p.is_struct: return StructInputImpl(p) else: assert False, p def CBool(value): return 'true' if value else 'false' # A trusted wrapper that validates the arguments passed from untrusted code # before passing them to the underlying public Mojo API. def GenerateMojoSyscall(functions, out): template = jinja_env.get_template('mojo_syscall.cc.tmpl') code = CodeWriter() code.PushMargin() for f in functions: impls = [ImplForParam(p) for p in f.params] impls.append(ImplForParam(f.result_param)) code << 'case %d:' % f.uid code.PushMargin() code << '{' with code.Indent(): num_params = len(f.params) + 2 code << 'if (num_params != %d) {' % num_params with code.Indent(): code << 'return -1;' code << '}' # Declare temporaries. for impl in impls: impl.DeclareVars(code) def ConvertParam(code, impl): code << 'if (!%s) {' % impl.ConvertParam() with code.Indent(): code << 'return -1;' code << '}' code << '{' with code.Indent(): code << 'ScopedCopyLock copy_lock(nap);' # Convert and validate pointers in two passes. # Arrays cannot be validated until the size parameter has been # converted. for impl in impls: if not impl.IsArray(): ConvertParam(code, impl) for impl in impls: if impl.IsArray(): ConvertParam(code, impl) code << '}' code << '' # Call getParams = [impl.CallParam() for impl in impls[:-1]] code << 'result_value = %s(%s);' % (f.name, ', '.join(getParams)) code << '' # Write outputs code << '{' with code.Indent(): code << 'ScopedCopyLock copy_lock(nap);' for impl in impls: impl.CopyOut(code) code << '}' code << '' code << 'return 0;' code << '}' code.PopMargin() body = code.GetValue() text = template.render( generator_warning=GeneratorWarning(), body=body) out.write(text) def OutFile(dir_path, name): if not os.path.exists(dir_path): os.makedirs(dir_path) return open(os.path.join(dir_path, name), 'w') def main(args): usage = 'usage: %prog [options]' parser = optparse.OptionParser(usage=usage) parser.add_option( '-d', dest='out_dir', metavar='DIR', help='output generated code into directory DIR') options, args = parser.parse_args(args=args) if not options.out_dir: parser.error('-d is required') if args: parser.error('unexpected positional arguments: %s' % ' '.join(args)) mojo = interface.MakeInterface() out = OutFile(options.out_dir, 'libmojo.cc') GenerateLibMojo(mojo.functions, out) out = OutFile(options.out_dir, 'mojo_syscall.cc') GenerateMojoSyscall(mojo.functions, out) if __name__ == '__main__': main(sys.argv[1:])