repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
endlessm/chromium-browser
|
tools/swarming_client/third_party/pyasn1/pyasn1/compat/dateandtime.py
|
Python
|
bsd-3-clause
| 482
| 0
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/li
|
cense.html
#
import time
from datetime import datetime
from sys import version_info
__all__ = ['strptime']
if version_info[:2] <= (2, 4):
def strptime(text, dateFormat):
return datetime(*(time.strptime(text, dateFormat)[0:6]))
else:
def strptime(text, date
|
Format):
return datetime.strptime(text, dateFormat)
|
symbolicdata/code
|
src/sdeval/classes/MachineSettingsFromXMLBuilder_test.py
|
Python
|
gpl-3.0
| 3,258
| 0.008287
|
import unittest
import MachineSettingsFromXMLBuilder as MSFXMLB
import MachineSettings as MS
class TestMachineSettingsFromXMLBuilder(unittest.TestCase):
"""
Tests for the class MachineSettingsFromXMLBuilder
.. moduleauthor:: Albert Heinle <albert.heinle@uwaterloo.ca>
"""
def setUp(self):
"""
Our setup is a typical one where we have three computer algebra systems in
in the input dictionary and "time -p" as our time command
"""
casDict = {"Singular":"Singular", "Magma":"magma", "Maple":"maple"}
timeCommand = "time -p"
self.msTest = MS.MachineSettings(casDict,timeCommand)
self.builder = MSFXMLB.MachineSettingsFromXMLBuilder()
def test_CorrectInput(self):
"""
This will simply include the raw xml file of msTest (see setUp)
We will parse it and then compare the different entries in the original
msTest and the parsed one.
"""
actualOutPut = self.builder.build(self.msTest.toXML().toxml())
self.assertEqual(self.msTest.getCASDict(),actualOutPut.getCASDict(),
"The computer algebra dictionary got broken after parsing")
self.assertEqual(self.msTest.getTimeCommand(),actualOutPut.getTimeCommand(),
"We obtain a different time command after parsing")
self.assertEqual(str(self.msTest),str(actualOutPut),
"The string representation of the original machine settings instance\
and the parsed representative differs.")
def test_InvalidXML(self):
"""
We will include some tests here that will cause the parsing errors to raise.
1) invalid XML syntax
- Emtpy string
- "<xml></xml>"
- "!@#$%^&*()_+"
- "123467"
|
2) Invalid input for Machine Settings XML
"""
#1)
testPassed = 1
try:
temp = builder.build("")
testPassed = 0
except:
pass
if testPassed == 0:
self.fail("Could build Machine Settings from the
|
empty string.")
try:
temp = builder.build("<xml></xml>")
testPassed = 0
except:
pass
if (testPassed == 0):
self.fail("Could build Machine Settings from empty xml.")
try:
temp = builder.build("!@#$%^&*()_+")
testPassed = 0
except:
pass
if (testPassed == 0):
self.fail("Could build Machine Settings from completely invalid string.")
try:
temp = builder.build("123467")
testPassed = 0
except:
pass
if testPassed == 0:
self.fail("Could build Machine Settings from \"123467\".")
#2)
try:
temp = builder.build('<?xml version="1.0" ?><machinesettings><othervars>\
<timecommand>time -p</timecommand></othervars><casdictionary></casdictionary></machinesettings>')
testPassed = 0
except:
pass
if (testPassed == 0):
self.fail("Could build Machine Settings from string without computer algebra systems.")
if __name__=="__main__":
unittest.main()
|
zjj/trac_hack
|
trac/versioncontrol/admin.py
|
Python
|
bsd-3-clause
| 16,185
| 0.001792
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os.path
import sys
from genshi.builder import tag
from trac.admin import IAdminCommandProvider, IAdminPanelProvider
from trac.config import ListOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util import as_bool, is_path_below
from trac.util.compat import any
from trac.util.text import breakable_path, normalize_whitespace, print_table, \
printout
from trac.util.translation import _, ngettext, tag_
from trac.versioncontrol import DbRepositoryProvider, RepositoryManager, \
is_default
from trac.web.chrome import Chrome, add_notice, add_warning
class VersionControlAdmin(Component):
"""trac-admin command provider for version control administration."""
implements(IAdminCommandProvider, IPermissionRequestor)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('changeset added', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets added to a repository
This command should be called from a post-commit hook. It will
trigger a cache update and notify components about the addition.
""",
self._complete_repos, self._do_changeset_added)
yield ('changeset modified', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets modified in a repository
This command should be called from a post-revprop hook after
revision properties like the commit message, author or date
have been changed. It will trigger a cache update for the given
revisions and notify components about the change.
""",
self._complete_repos, self._do_changeset_modified)
yield ('repository list', '',
'List source repositories',
None, self._do_list)
yield ('repository resync', '<repos> [rev]',
"""Re-synchronize trac with repositories
When [rev] is specified, only that revision is synchronized.
Otherwise, the complete revision history is synchronized. Note
that this operation can take a long time to complete.
If synchronization gets interrupted, it can be resumed later
using the `sync` command.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_resync)
yield ('repository sync', '<repos> [rev]',
"""Resume synchronization of repositories
Similar to `resync`, but doesn't clear the already synchronized
changesets. Useful for resuming an interrupted `resync`.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_sync)
def get_reponames(self):
rm = RepositoryManager(self.env)
return [reponame or '(default)' for reponame
in rm.get_all_repositories()]
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _do_changeset_added(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_added', reponame, revs)
def _do_changeset_modified(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_modified', reponame, revs)
def _do_list(self):
rm = RepositoryManager(self.env)
values = []
for (reponame, info) in sorted(rm.get_all_repositories().iteritems()):
alias = ''
if 'alias' in info:
alias = info['alias'] or '(default)'
values.append((reponame or '(default)', info.get('type', ''),
alias, info.get('dir', '')))
print_table(values, [_('Name'), _('Type'), _('Alias'), _('Directory')])
def _sync(self, reponame, rev, clean):
rm = RepositoryManager(self.env)
if reponame == '*':
if rev is not None:
raise TracError(_('Cannot synchronize a single revision '
'on multiple repositories'))
repositories = rm.get_real_repositories()
else:
if is_default(reponame):
reponame = ''
repos = rm.get_repository(reponame)
if repos is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=reponame or '(default)'))
if rev is not None:
repos.sync_changeset(rev)
printout(_('%(rev)s resynced on %(reponame)s.', rev=rev,
reponame=repos.reponame or '(default)'))
return
repositories = [repos]
db = self.env.get_db_cnx()
for repos in sorted(repositories, key=lambda r: r.reponame):
printout(_('Resyncing repository history for %(reponame)s... ',
reponame=repos.reponame or '(default)'))
repos.sync(self._sync_feedback, clean=clean)
cursor = db.cursor()
cursor.execute("SELECT count(rev) FROM revision WHERE repos=%s",
(repos.id,))
for cnt, in cursor:
printout(ngettext('%(num)s revision cached.',
'%(num)s revisions cached.', num=cnt))
printout(_('Done.'))
def
|
_sync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
def _do_resync(self, reponame, rev=None):
self._sync(reponame, rev, clean=True)
def _do_sync(self, reponame, rev=None):
self._sync(reponame, rev, clean=False)
# IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['BROWSER_VIEW', 'CHANGESET_VIEW',
|
'FILE_VIEW', 'LOG_VIEW'])]
class RepositoryAdminPanel(Component):
"""Web admin panel for repository administration."""
implements(IAdminPanelProvider)
allowed_repository_dir_prefixes = ListOption('versioncontrol',
'allowed_repository_dir_prefixes', '',
doc="""Comma-separated list of allowed prefixes for repository
directories when adding and editing repositories in the repository
admin panel. If the list is empty, all repository directories are
allowed. (''since 0.12.1'')""")
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'VERSIONCONTROL_ADMIN' in req.perm:
yield ('versioncontrol', _('Version Control'), 'repository',
_('Repositories'))
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('VERSIONCONTROL_ADMIN')
# Retrieve info for all repositories
rm = RepositoryManager(self.env)
all_repos = rm.get_all_repositories()
db_provider = self.env[DbRepositoryProvider]
if path_info:
# Detail view
reponame = not is_default(path_info) and path_info or ''
info = all_repos.get(reponame)
if info is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=path_info))
if req.method == 'POST':
|
cernops/CloudMan
|
export/export.py
|
Python
|
apache-2.0
| 11,272
| 0.011888
|
from cloudman.cloudman.models import TopLevelAllocationByZone,TopLevelAllocation,TopLevelAllocationAllowedResourceType
from cloudman.cloudman.models import ProjectAllocation,Project,ProjectMetadata,ProjectAllocationMetadata,GroupAllocation
from cloudman.cloudman.models import GroupAllocationMetadata
from cloudman.cloudman.commonFunctions import getKSI2K,getPercent,getUserListfromEgroup
from settings import *
#Will export all the allocation in cloudman starting from top level allocation
def getAllAllocationByProject(resolve):
# Create the <ALLOCATION> base element
global resolvegroup
resolvegroup = resolve
alloc = {'PROJECT_ALL': []}
prjObjList = Project.objects.all()
for prObj in prjObjList:
project_name = prObj.name
project_attr = genProject(project_name)
project_attr['TOP_LEVEL_ALLOCATION_ALL'] = []
for tpAllocNode in genTopAllocByProject(project_name):
project_attr['TOP_LEVEL_ALLOCATION_ALL'].append(tpAllocNode)
alloc['PROJECT_ALL'].append({'PROJECT':project_attr})
return alloc
#Will export all the allocation in cloudman starting from top level allocation
def getAllAllocationByTopLevel(resolve):
# Create the <ALLOCATION> base element
global resolvegroup
resolvegroup = resolve
alloc = {'TOP_LEVEL_ALLOCATION_ALL': []}
#get all top_level_allocation as list
top_alloc_node = genTopAlloc()
for node in top_alloc_node:
alloc['TOP_LEVEL_ALLOCATION_ALL'].append(node) #add the top_level_allocation to main tree
return alloc
def genTopAllocByProject(project_name):
node_list = []
tpAllocNameList = ProjectAllocation.objects.filter(project__name = project_name).values_list('top_level_allocation__name',flat =True).distinct()
for tpAllocName in tpAllocNameList:
tpAllocList = TopLevelAllocation.objects.filter(name = tpAllocName).select_related(depth=1)
for alloc in tpAllocList:
tp_alloc_name = alloc.name
attribute = {}
attribute['NAME'] = tp_alloc_name
attribute['GROUP'] = str(alloc.group.name)
attribute['KSI2K'] = str(alloc.hepspec * float(KSI2K))
attribute['HS06'] = str(alloc.hepspec)
zone_alloc_node = genZoneAlloc(tp_alloc_name)
resource_type_node = genAllowedResourceType(tp_alloc_name)
attribute['ZONE_ALLOCATION'] = zone_alloc_node
attribute['ALLOWED_RESOURCE_TYPE'] = resource_type_node
attribute['MEMORY'] = str(alloc.memory)
attribute['STORAGE'] = str(alloc.storage)
attribute['BANDWIDTH'] = str(alloc.bandwidth)
proj_alloc_list = genProjAlloc(tp_alloc_name,False)
attribute['PROJECT_ALLOCATION_ALL'] = proj_alloc_list
child = {'TOP_LEVEL_ALLOCATION':attribute}
node_list.append(child)
return node_list
#this will create the list of all the top_level_allocation node
def genTopAlloc():
node_list = []
tpAllocList = TopLevelAllocation.objects.select_related(depth=1)
for alloc in tpAllocList:
tp_alloc_name = alloc.name
attribute = {}
attribute['NAME'] = tp_alloc_name
attribute['GROUP'] = str(alloc.group.name)
attribute['KSI2K'] = str(getKSI2K(alloc.hepspec))
attribute['HS06'] = str(alloc.hepspec)
attribute['MEMORY'] = str(alloc.memory)
attribute['STORAGE'] = str(alloc.storage)
attribute['BANDWIDTH'] = str(alloc.bandwidth)
zone_alloc = genZoneAlloc(tp_alloc_name)
attribute['ZONE_ALLOCATION'] = zone_alloc
resource_type = genAllowedResourceType(tp_alloc_name)
attribute['ALLOWED_RESOURCE_TYPE'] = resource_type
proj_alloc = genProjAlloc(tp_alloc_name)
attribute['PROJECT_ALLOCATION_ALL'] = proj_alloc
child = {'TOP_LEVEL_ALLOCATION':attribute}
node_list.append(child)
return node_list
#this will create all the project_allocation for a given top_level_allocation_name
def genProjAlloc(tp_alloc_name,showprojectinfo=True):
alloc_list = ProjectAllocation.objects.filter(top_level_allocation__name = tp_alloc_name).select_related(depth=1)
prj_alloc_list = []
for alloc in alloc_list:
alloc_name = alloc.name
attribute = {}
attribute['NAME'] = alloc_name
attribute['GROUP']
|
= str(alloc.group.name)
project_name = alloc.project.name
if showprojectinfo:
attribute['PROJECT'] = genProject(project_name)
attribute['PROJECT_ALLOCATION_METADATA'] = genProjectAllocMetaData(alloc_name)
hepspec = alloc.hepspec
tp_alloc_hepspec = alloc.top_level_allocation.hepspec
hepspec_percent = str(getPercent(hepspec ,tp_alloc_hepspec ))
attribute['KSI2K'] = str(getKSI2K(hepspec))
attribut
|
e['HS06'] = str(hepspec)
attribute['HS06PERCENT'] = str(hepspec_percent)
attribute['MEMORY'] = str(alloc.memory)
attribute['STORAGE'] = str(alloc.storage)
attribute['BANDWIDTH'] = str(alloc.bandwidth)
child = {'PROJECT_ALLOCATION':attribute}
gp_alloc_list = genGroupAlloc(alloc_name) #get the group_allocation for this project_allocation
attribute['GROUP_ALLOCATION_ALL'] = gp_alloc_list
prj_alloc_list.append(child)
return prj_alloc_list
##will generate the Group allocation for the given project allocation it will return the list of group allocation node
def genGroupAlloc(alloc_name,top_group = True):
if top_group:
grpAllocObj_list = GroupAllocation.objects.filter(project_allocation__name = alloc_name).select_related('group')
else:
grpAllocObj_list = GroupAllocation.objects.filter(parent_group_allocation__name = alloc_name).select_related('group')
grp_alloc_list = []
for alloc in grpAllocObj_list:
grpalloc_name = alloc.name
hepspec = alloc.hepspec
if top_group:
parent_hepspec = alloc.project_allocation.hepspec
else:
parent_hepspec = alloc.parent_group_allocation.hepspec
attribute ={}
attribute['NAME'] = grpalloc_name
attribute['GROUP'] = str(alloc.group.name)
attribute['KSI2K'] = str(getKSI2K(alloc.hepspec))
attribute['HS06'] = str(alloc.hepspec)
attribute['HS06PERCENT'] = str(getPercent(hepspec,parent_hepspec))
attribute['MEMORY'] = str(alloc.memory)
attribute['STORAGE'] = str(alloc.storage)
attribute['BANDWIDTH'] = str(alloc.bandwidth)
attribute['GROUP_ALLOCATION_METADATA'] = genGroupAllocMetaData(grpalloc_name)
attribute['GROUP_ALLOCATION_ALL'] = genGroupAlloc(grpalloc_name,False)
child = {'GROUP_ALLOCATION':attribute}
grp_alloc_list.append(child)
return grp_alloc_list
##will create and return the MetaData child for the given group_allocation
def genGroupAllocMetaData(gp_alloc_name):
node = []
grpAllocMetaObj_list = GroupAllocationMetadata.objects.filter(group_allocation__name = gp_alloc_name)
for gpAllocMeta in grpAllocMetaObj_list:
attribute = {}
attrname = gpAllocMeta.attribute
value = gpAllocMeta.value
if attrname.lower() == 'EGROUP'.lower() and resolvegroup.lower() !='no':
if resolvegroup.lower() =='yes' or resolvegroup.lower() =='':
user_list = getUserListfromEgroup(value)
egrp_resolve_attr = {}
egrp_resolve_attr['NAME'] = 'USER_LIST_LDAP'
egrp_resolve_attr['VALUE'] = user_list
egroup_child = {'METADATA':egrp_resolve_attr}
node.append(egroup_child)
if resolvegroup.lower() =='':
attribute['NAME'] = attrname
attribute['VALUE'] = value
child = {'METADATA':attribute}
node.append(child)
else:
attribute['NAME'] = attrname
attribute['VALUE'] = value
child = {'METADATA':attribute}
node.append(child)
return node
##will create and return the MetaData child for the given group_allocation
def
|
alchemy-fr/Phraseanet-Docs
|
config/all.py
|
Python
|
gpl-3.0
| 7,172
| 0.007669
|
# Global configuration information used across all the
# translations of documentation.
#
# -- General configuration -----------------------------
|
------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo','sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of so
|
urce files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Phraseanet'
copyright = u'2004-2021, <a href="http://www.alchemy.fr" target="_blank">Alchemy</a>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.1'
# The full version, including alpha/beta/rc tags.
release = '4.1'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'phraseanet'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../_templates/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'yes'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Phraseanetdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Phraseanet.tex', u'Phraseanet Documentation',
u'Phraseanet Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phraseanet', u'Phraseanet Documentation',
[u'Phraseanet Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Phraseanet', u'Phraseanet Documentation',
u'Phraseanet Team', 'Phraseanet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Languages available.
languages = ['fr','en']
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/ai/DistributedWinterCarolingTargetAI.py
|
Python
|
apache-2.0
| 312
| 0.012821
|
from direct.directnotify import DirectNotifyGlobal
from toont
|
own.ai.DistributedScavengerHuntTargetAI import DistributedScavengerHuntTargetAI
class DistributedWinterCarolingTargetAI(DistributedScavengerHuntTargetAI):
notify = DirectNotifyGlobal.dire
|
ctNotify.newCategory("DistributedWinterCarolingTargetAI")
|
bitmazk/django-hero-slider
|
hero_slider/south_migrations/0002_auto__add_field_slideritemtitle_is_published.py
|
Python
|
mit
| 8,315
| 0.007937
|
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SliderItemTitle.is_published'
db.add_column('hero_slider_slideritemtitle', 'is_published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SliderItemTitle.is_published'
db.delete_column('hero_slider_slideritemtitle', 'is_published')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
|
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('djan
|
go.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'hero_slider.slideritem': {
'Meta': {'object_name': 'SliderItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'hero_slider.slideritemtitle': {
'Meta': {'object_name': 'SliderItemTitle'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'slider_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hero_slider.SliderItem']"}),
'title':
|
georgesterpu/pyVSR
|
pyVSR/ouluvs2/scripts/create_labels.py
|
Python
|
gpl-3.0
| 1,947
| 0.002054
|
from os import path
|
s1 = 'one seven three five one six two six six seven'
s2 = 'four zero two nine one eight five nine zero four'
s3 = 'one nine zero seven eight eight zero three two eight'
s4 = 'four nine one two one one eight five five one'
s5 = 'eight six three five four zero two one one two'
s6 = 'two t
|
hree nine zero zero one six seven six four'
s7 = 'five two seven one six one three six seven zero'
s8 = 'nine seven four four four three five five eight seven'
s9 = 'six three eight five three nine eight five six five'
s10 = 'seven three two four zero one nine nine five zero'
digits = [s1, s2, s3, s4, s5, s6, s7, s8, s9, s10]
s31 = 'Excuse me'
s32 = 'Goodbye'
s33 = 'Hello'
s34 = 'How are you'
s35 = 'Nice to meet you'
s36 = 'See you'
s37 = 'I am sorry'
s38 = 'Thank you'
s39 = 'Have a good time'
s40 = 'You are welcome'
short = [s31, s32, s33, s34, s35, s36, s37, s38, s39, s40]
sentences = './splits/all.txt'
transcript_dir = '/run/media/john_tukey/download/datasets/ouluvs2/transcript_sentence/'
def get_sentence(user, sid):
with open(path.join(transcript_dir, user), 'r') as f:
contents = f.read().splitlines()
return contents[sid][:-1]
def main():
with open(sentences, 'r') as f:
contents = f.read().splitlines()
labels_dict = dict()
for line in contents:
user, sentence = line.split('_') # this looks like a neutral face. why ? <(^.^)>
key = line
sid = int(sentence[1:])
if sid <= 30:
value = digits[(sid-1)//3]
elif 30 < sid <= 60:
value = short[(sid-1)//3 - 10]
elif 60 < sid <= 70:
value = get_sentence(user, sid-61)
else:
raise Exception('Allowed sentence ids from 1 to 70')
labels_dict[key] = value
with open('labels.txt', 'w') as f:
for (k,v) in labels_dict.items():
f.write(k + ' ' + v + '\n')
if __name__ == '__main__':
main()
|
modelbrouwers/modelbouwdag.nl
|
src/modelbouwdag/wsgi/production.py
|
Python
|
mit
| 1,508
| 0.000663
|
"""
WSGI config for modelbouwdag project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level varia
|
ble
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to rep
|
lace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import site
import sys
from .base import setupenv
setupenv()
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "modelbouwdag.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "modelbouwdag.conf.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
DrewMeyersCUboulder/UPOD_Bridge
|
Server/AbstractDb.py
|
Python
|
mit
| 383
| 0.002611
|
"""
Ab
|
stract class for Db classes
"""
from abc import ABCMeta, abstractmethod
class AbstractDb(object):
""" AbstractDb """
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def write(self, data):
pass
@abstractmethod
def query(self, condition):
pass
@abstractmethod
def read(self, quer
|
y):
pass
|
Zouyiran/ryu
|
ryu/app/chapter_2/pre_install_app.py
|
Python
|
apache-2.0
| 12,687
| 0.005833
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import networkx as nx
import copy
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto.ofproto_v1_3 import OFP_DEFAULT_PRIORITY
from ryu.topology.api import get_all_switch, get_all_link, get_all_host
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, arp, icmp
from ryu.lib.packet import ether_types
from ryu.lib import hub
'''
###For 2 chapter###
fig 2-8
pre-install flow entries for end-to-end hosts('h1' and 'h2')
----test----
Linear topology
ICMP
'''
class ProactiveApp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ProactiveApp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.discover_thread = hub.spawn(self.pre_install)
# {dpid:{port:mac,port:mac,...},dpid:{port:mac,port:mac,...},...} only switches'mac
self.dpids_port_to_mac = dict()
# [dpid,dpid,...]
self.dpids = list()
# {(dpid,port):host_mac,(dpid,port):host_mac,...} only hosts'mac
self.dpids_port_to_host = dict()
#[host_mac,host_mac,host_mac,...]
self.hosts = list()
#{(src_dpid,dst_dpid):(src_port,dst_port),():(),...}
self.links_dpid_to_port = dict()
# [(src_dpid,dst_dpid),(src_dpid,dst_dpid),...]
self.links = list()
self.adjacency_matrix = dict()
self.pre_adjacency_matrix = dict()
# {
# (dpid,dpid):{xxx:[dpid,dpid,dpid],xxx:[dpid,dpid,dpid,dpid],...},
# (dpid,dpid):{xxx:[dpid,dpid,dpid],xxx:[dpid,dpid,dpid,dpid],...},
# ...}
self.path_table = dict()
self.dpid_to_dp = dict()
self.SLEEP_PERIOD = 2 #seconds
@set_ev_cls(ofp_event.EventOFPStateChange,[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.dpid_to_dp:
self.logger.info('register datapath: %04x', datapath.id)
self.dpid_to_dp[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.dpid_to_dp:
self.logger.info('un register datapath: %04x', datapath.id)
del self.dpid_to_dp[datapath.id]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
|
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def pre_install(self):
while True:
|
hub.sleep(self.SLEEP_PERIOD)
self.pre_adjacency_matrix = copy.deepcopy(self.adjacency_matrix)
self._update_topology()
self._update_hosts()
if self.pre_adjacency_matrix != self.adjacency_matrix:
self.logger.info('***********discover_topology thread: TOPO UPDATE***********')
self.path_table = self._get_path_table(self.adjacency_matrix)
self.pre_install_flow()
def _update_topology(self):
switch_list = get_all_switch(self)
if len(switch_list) != 0:
self.dpids_port_to_mac = self._get_dpids_port_to_mac(switch_list)
self.dpids = self._get_dpids(switch_list) #[dpid,dpid,dpid,...]
link_dict = get_all_link(self)
if len(link_dict) != 0:
self.links_dpid_to_port = self._get_links_dpid_to_port(link_dict)
self.links = self._get_links(self.links_dpid_to_port) #[(src.dpid,dst.dpid),(src.dpid,dst.dpid),...]
if self.dpids and self.links:
self.adjacency_matrix = self._get_adjacency_matrix(self.dpids, self.links)
def _get_dpids_port_to_mac(self,switch_list):
table = dict()
for switch in switch_list:
dpid = switch.dp.id
table.setdefault(dpid,{})
ports = switch.ports
for port in ports:
table[dpid][port.port_no] = port.hw_addr
return table
def _get_dpids(self,switch_list):
dpid_list = list()
for switch in switch_list:
dpid_list.append(switch.dp.id)
return dpid_list
def _get_links(self,link_ports_table):
return link_ports_table.keys()
def _get_links_dpid_to_port(self,link_dict):
table = dict()
for link in link_dict.keys():
src = link.src #ryu.topology.switches.Port
dst = link.dst
table[(src.dpid,dst.dpid)] = (src.port_no, dst.port_no)
return table
def _get_adjacency_matrix(self,dpids,links):
graph = dict()
for src in dpids:
graph[src] = dict()
for dst in dpids:
graph[src][dst] = float('inf')
if src == dst:
graph[src][dst] = 0
elif (src, dst) in links:
graph[src][dst] = 1
return graph
def _get_path_table(self, matrix):
if matrix:
dpids = matrix.keys()
g = nx.Graph()
g.add_nodes_from(dpids)
for i in dpids:
for j in dpids:
if matrix[i][j] == 1:
g.add_edge(i,j,weight=1)
return self.__graph_to_path(g)
def __graph_to_path(self,g): # {(i,j):[[],[],...],(i,j):[[],[],[],..],...}
all_shortest_paths = dict()
for i in g.nodes():
for j in g.nodes():
if i == j:
continue
all_shortest_paths[(i,j)] = list()
try:
nx.shortest_path(g,i,j)
except nx.exception.NetworkXNoPath:
continue
for each in nx.all_shortest_paths(g,i,j):
all_shortest_paths[(i,j)].append(each)
return all_shortest_paths
def _update_hosts(self):
host_list = get_all_host(self)
if host_list:
self.dpids_port_to_host = self._get_dpids_port_to_host(host_list)
self.hosts = self._get_hosts(host_list)
def _get_dpids_port_to_host(self,host_list):
table = dict()
for host in host_list:
host_mac = host.mac
host_port = host.port # Port
dpid = host_port.dpid
table[(dpid,host_port.port_no)] = host_mac
return table
def _get_hosts(self,host_list):
hosts = list()
for host in host_list:
hosts.append(host.mac)
return hosts
def pre_install_flow(self):
print("execute pre-install flow")
if len(self.hosts) == 2:
print("host num:",2)
host1 = self.hosts[0]
host2 = self.hosts[1]
self._pre_install_flow(host1,host2)
self._pre_install_flow(host2,host1)
def _pre_install_flow(self,host1,host2):
host1_dpid = None
host2_dpid = None
host1_port = None
host2_port = None
for dpid_port in self.dpids
|
dparks1134/STAMP
|
createExeWindows.py
|
Python
|
gpl-3.0
| 3,184
| 0.039259
|
from distutils.core import setup
import py2exe
import os
import matplotlib as mpl
# delete all pyc files
for dirpath, dirnames, filenames in os.walk(os.getcwd()):
for each_file in filenames:
if each_file.endswith('.pyc'):
if os.path.exists(os.path.join(dirpath, each_file)):
os.remove(os.path.join(dirpath, each_file))
# create list of plugins
plugin_directories = ['','/common','/common/multipleComparisonCorrections','/common/multipleComparisonCorrections/additional',
'/groups', '/groups/effectSizeFilters','/groups/plots','/g
|
roups/plots/configGUI','/groups/statisticalTests',
'/multiGroups', '/multiGroups/effectSizeFilters','/multiGroups/plots','/multiGroups/plots/configGUI', '/multiGroups/postHoc','/multiGroups/statisticalTests',
'/samples','/samples/confidenceIntervalMethods','/samples/effectSizeFilters','/samples/plots','/samples/plots/configGUI','/samples/statisticalTests',
|
'/samples/statisticalTests/additional','/samples/plots/examples']
plugin_files = []
for directory in plugin_directories:
for files in os.listdir("./stamp/plugins" + directory):
f1 = "./stamp/plugins" + directory + "/" + files
if os.path.isfile(f1): # skip directories
f2 = "library/stamp/plugins" + directory, [f1]
plugin_files.append(f2)
# grab all additional resource or data files
icon_files = []
for f in os.listdir("./stamp/icons"):
f1 = "./stamp/icons/" + f
if os.path.isfile(f1): # skip directories
f2 = "icons", [f1]
icon_files.append(f2)
example_files = []
for f in os.listdir("examples"):
f1 = "examples/" + f
if os.path.isfile(f1): # skip directories
f2 = "examples", [f1]
example_files.append(f2)
else:
if f != '.svn':
for g in os.listdir(f1):
if os.path.isfile(f1 + '/' + g): # skip directories
f2 = 'examples/' + f, [f1 + '/' + g]
example_files.append(f2)
for f in os.listdir('examples'):
f1 = 'examples/' + f
if os.path.isfile(f1): # skip directories
f2 = 'examples', [f1]
example_files.append(f2)
data_files = []
for f in os.listdir("./stamp/data"):
f1 = "./stamp/data/" + f
if os.path.isfile(f1): # skip directories
f2 = "library/stamp/data", [f1]
data_files.append(f2)
root_files = ['LICENSE.txt', './windows/STAMP.exe.log', './windows/readme.txt', 'msvcp90.dll', './manual/STAMP_Users_Guide.pdf']
mpl_data_files = mpl.get_py2exe_datafiles()
# setup configuration
setup(
name = "STAMP",
version = "2.1.1",
description = "Statistical analysis of taxonomic and functional profiles",
author = "Donovan Parks",
windows=[{"script":"STAMP.py", "icon_resources": [(1, "./stamp/icons/stamp.ico")]}],
options =
{
"py2exe":
{
"unbuffered": True,
"optimize": 2,
"skip_archive": True,
"includes": ["sip", "PyQt4", "sqlite3", "FileDialog"],
"packages": ["matplotlib","pytz","scipy","mpl_toolkits", "pyparsing", "biom", "pyqi", "h5py", "six"],
"dll_excludes": ["libgdk_pixbuf-2.0-0.dll","libgdk-win32-2.0-0.dll", "libgobject-2.0-0.dll", "tcl84.dll", "tk84.dll"],
}
},
zipfile = "library/",
data_files = icon_files + example_files + data_files + root_files + plugin_files + mpl_data_files,
)
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/wheel/key.py
|
Python
|
apache-2.0
| 3,278
| 0.00061
|
# -*- coding: utf-8 -*-
'''
Wheel system wrapper for key system
'''
from __future__ import absolute_import
# Import python libs
import os
import hashlib
# Import salt libs
import salt.key
import salt.crypt
__func_alias__ = {
'list_': 'list'
}
def list_(match):
'''
List all the keys under a named status
'''
skey = salt.key.Key(__opts__)
return skey.list_status(match)
def list_all():
'''
List all the keys
'''
skey = salt.key.Key(__opts__)
return skey.all_keys()
def accept(match, include_rejected=False, include_denied=False):
'''
Accept keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.accept(match, include_rejected=include_rejected, include_denied=include_denied)
def accept_dict(match):
'''
Accept keys based on a dict of keys
Example to move a list of keys from the `minions_pre` (pending) directory
to the `minions` (accepted) directory:
..
|
code-block:: python
{
'minions_pre'
|
: [
'jerry',
'stuart',
'bob',
],
}
'''
skey = salt.key.Key(__opts__)
return skey.accept(match_dict=match)
def delete(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match)
def delete_dict(match):
'''
Delete keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match_dict=match)
def reject(match, include_accepted=False, include_denied=False):
'''
Reject keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.reject(match, include_accepted=include_accepted, include_denied=include_denied)
def reject_dict(match):
'''
Reject keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.reject(match_dict=match)
def key_str(match):
'''
Return the key strings
'''
skey = salt.key.Key(__opts__)
return skey.key_str(match)
def finger(match):
'''
Return the matching key fingerprints
'''
skey = salt.key.Key(__opts__)
return skey.finger(match)
def gen(id_=None, keysize=2048):
'''
Generate a key pair. No keys are stored on the master, a keypair is
returned as a dict containing pub and priv keys
'''
if id_ is None:
id_ = hashlib.sha512(os.urandom(32)).hexdigest()
ret = {'priv': '',
'pub': ''}
priv = salt.crypt.gen_keys(__opts__['pki_dir'], id_, keysize)
pub = '{0}.pub'.format(priv[:priv.rindex('.')])
with salt.utils.fopen(priv) as fp_:
ret['priv'] = fp_.read()
with salt.utils.fopen(pub) as fp_:
ret['pub'] = fp_.read()
os.remove(priv)
os.remove(pub)
return ret
def gen_accept(id_, keysize=2048, force=False):
'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master.
'''
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__['pki_dir'], 'minions', id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.fopen(acc_path, 'w+') as fp_:
fp_.write(ret['pub'])
return ret
|
matthewearl/photo-a-day-aligner
|
pada/landmarks.py
|
Python
|
mit
| 2,163
| 0.002774
|
# Copyright (c) 2016 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Soft
|
ware without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR I
|
MPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
__all__ = (
'get_face_mask',
'LandmarkFinder',
'NoFaces',
'TooManyFaces',
)
import cv2
import dlib
import numpy
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
class LandmarkFinder(object):
def __init__(self, predictor_path):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(str(predictor_path))
def get(self, im):
rects = self.detector(im, 1)
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y]
for p in self.predictor(im, rects[0]).parts()])
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(shape, landmarks):
im = numpy.zeros(shape[:2], dtype=numpy.float64)
draw_convex_hull(im,
landmarks,
color=1)
return im
|
novafloss/django-json-dbindex
|
json_dbindex/pgcommands.py
|
Python
|
bsd-3-clause
| 3,263
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of django-json-dbindex nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
import logging
from django.db import connections
def index_exists(index, database='default'):
"""Execute raw sql
"""
cursor = connections[database].cursor()
qry = "SELECT COUNT(indexname) FROM pg_indexes WHERE indexname = %s"
cursor.execute(qry, [index['name']])
row = cursor.fetchone()
cursor.close()
return row[0] == 1
def execute_raw(sql, database='default', parms=None):
"""
Execute a raw SQL command
sql (string) : SQL command
database (string): the database name configured in settings
"""
try:
cursor = connections[database].cursor()
if parms is not None:
cursor.execute(sql, parms)
else:
cursor.execute(sql)
cursor.close()
return 0
except Exception, e:
logging.error('Cant execute %s -- Exception raised %s' % (sql, e))
return 1
def drop_index(index, database='default'):
"""
Check if index exists and drop it
index (dict) : index description
"""
if 'database' in index:
database = index['database']
if index_exists(index, database):
logging.info("Will drop %s" % index['name'])
res = execute_raw(index['cmd'], database)
logging.info("%s dropped" % index['name'])
else:
res = 1
logg
|
ing.info("%s doesn't exists" % index['name'])
return res
def create_index(index, database='default'):
"""
Create an index
index (dict) : index description
{"name": "foo",
"database": "default",
"cmd": "CREATE INDEX foo_idx ON table (column)"
}
"""
if 'database' in index:
database = index['database']
if index_exists(index, database):
logging.info("%s still exists" % index['name'])
re
|
s = 1
else:
logging.info("Will create %s" % index['name'])
res = execute_raw(index['cmd'], database)
logging.info("%s created" % index['name'])
return res
def create_extensions(extensions, database='default'):
"""
Create all extensions
"""
for extension in extensions:
cmd = "CREATE EXTENSION IF NOT EXISTS %s" % (extension)
logging.info("Will create extension %s on database %s" % (extension,
database))
res = execute_raw(cmd,
database=database)
logging.info("%s created" % extension)
return res
|
e-koch/VLA_Lband
|
14B-088/HI/analysis/uv_plots/channel_1000_uvplot.py
|
Python
|
mit
| 813
| 0
|
import os
import matplotlib.pyplot as plt
import numpy as np
from plotting_styles import onecolumn_figure, default_figure
from paths import paper1_figures_path
'''
Make a UV plot of the 1000th HI channel.
'''
uvw = np.load("/mnt/M
|
yRAID/M33/VLA/14B-088/HI/"
|
"14B-088_HI_LSRK.ms.contsub_channel_1000.uvw.npy")
onecolumn_figure()
fig = plt.figure()
ax = fig.add_subplot(111) # , rasterized=True)
# plt.hexbin(uvw[0], uvw[1], bins='log', cmap='afmhot_r')
ax.scatter(uvw[0], uvw[1], s=0.1, color='k', rasterized=True)
plt.xlabel("U (m)")
plt.ylabel("V (m)")
plt.xlim([-3200, 3500])
plt.ylim([-3200, 3200])
plt.grid()
plt.tight_layout()
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.pdf"))
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.png"))
plt.close()
default_figure()
|
nict-isp/uds-sdk
|
uds/utils/dict.py
|
Python
|
gpl-2.0
| 789
| 0.001267
|
# -*- coding: utf-8 -*-
"""
uds.utils.dict
~~~~~~~~~~~~~~
Utility functions to parse string and others.
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL2, see LICENSE for more details.
"""
import copy
def override_dict(new, old):
"""Override old dict object with new one.
:param object new: New dict
:param object old: Nld dict
:return: Overridden result
:rtype: :attr:`object`
"""
if isinstance(new, d
|
ict):
|
merged = copy.deepcopy(old)
for key in new.keys():
if key in old:
merged[key] = override_dict(new[key], old[key])
else:
merged[key] = new[key]
return merged
else:
return new
|
FOSSRIT/PyCut
|
game/objects/slice.py
|
Python
|
mpl-2.0
| 124
| 0.008065
|
import py
|
game
class Slice():
"""docstring for Slice"""
def __init__(self, context):
|
self.context = context
|
FDio/vpp
|
test/test_abf.py
|
Python
|
apache-2.0
| 10,057
| 0
|
#!/usr/bin/env python3
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, \
VppIpTable, FibPathProto
from vpp_acl import AclRule, VppAcl
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from ipaddress import IPv4Network, IPv6Network
from vpp_object import VppObject
NUM_PKTS = 67
def find_abf_policy(test, id):
policies = test.vapi.abf_policy_dump()
for p in policies:
if id == p.policy.policy_id:
return True
return False
def find_abf_itf_attach(test, id, sw_if_index):
attachs = test.vapi.abf_itf_attach_dump()
for a in attachs:
if id == a.attach.policy_id and \
sw_if_index == a.attach.sw_if_index:
return True
return False
class VppAbfPolicy(VppObject):
def __init__(self,
test,
policy_id,
acl,
paths):
self._test = test
self.policy_id = policy_id
self.acl = acl
self.paths = paths
self.encoded_paths = []
for path in self.paths:
self.encoded_paths.append(path.encode())
|
def add_vpp_config(self):
self._test.vapi.abf_policy_add_del(
1,
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
'paths': self.encoded_paths})
self._test.regi
|
stry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.abf_policy_add_del(
0,
{'policy_id': self.policy_id,
'acl_index': self.acl.acl_index,
'n_paths': len(self.paths),
'paths': self.encoded_paths})
def query_vpp_config(self):
return find_abf_policy(self._test, self.policy_id)
def object_id(self):
return ("abf-policy-%d" % self.policy_id)
class VppAbfAttach(VppObject):
def __init__(self,
test,
policy_id,
sw_if_index,
priority,
is_ipv6=0):
self._test = test
self.policy_id = policy_id
self.sw_if_index = sw_if_index
self.priority = priority
self.is_ipv6 = is_ipv6
def add_vpp_config(self):
self._test.vapi.abf_itf_attach_add_del(
1,
{'policy_id': self.policy_id,
'sw_if_index': self.sw_if_index,
'priority': self.priority,
'is_ipv6': self.is_ipv6})
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.abf_itf_attach_add_del(
0,
{'policy_id': self.policy_id,
'sw_if_index': self.sw_if_index,
'priority': self.priority,
'is_ipv6': self.is_ipv6})
def query_vpp_config(self):
return find_abf_itf_attach(self._test,
self.policy_id,
self.sw_if_index)
def object_id(self):
return ("abf-attach-%d-%d" % (self.policy_id, self.sw_if_index))
class TestAbf(VppTestCase):
""" ABF Test Case """
@classmethod
def setUpClass(cls):
super(TestAbf, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestAbf, cls).tearDownClass()
def setUp(self):
super(TestAbf, self).setUp()
self.create_pg_interfaces(range(5))
for i in self.pg_interfaces[:4]:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
super(TestAbf, self).tearDown()
def test_abf4(self):
""" IPv4 ACL Based Forwarding
"""
#
# We are not testing the various matching capabilities
# of ACLs, that's done elsewhere. Here ware are testing
# the application of ACLs to a forwarding path to achieve
# ABF
# So we construct just a few ACLs to ensure the ABF policies
# are correctly constructed and used. And a few path types
# to test the API path decoding.
#
#
# Rule 1
#
rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
src_prefix=IPv4Network("1.1.1.1/32"),
dst_prefix=IPv4Network("1.1.1.2/32"))
acl_1 = VppAcl(self, rules=[rule_1])
acl_1.add_vpp_config()
#
# ABF policy for ACL 1 - path via interface 1
#
abf_1 = VppAbfPolicy(self, 10, acl_1,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
abf_1.add_vpp_config()
#
# Attach the policy to input interface Pg0
#
attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index, 50)
attach_1.add_vpp_config()
#
# fire in packet matching the ACL src,dst. If it's forwarded
# then the ABF was successful, since default routing will drop it
#
p_1 = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="1.1.1.1", dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg1)
#
# Attach a 'better' priority policy to the same interface
#
abf_2 = VppAbfPolicy(self, 11, acl_1,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index)])
abf_2.add_vpp_config()
attach_2 = VppAbfAttach(self, 11, self.pg0.sw_if_index, 40)
attach_2.add_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
#
# Attach a policy with priority in the middle
#
abf_3 = VppAbfPolicy(self, 12, acl_1,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)])
abf_3.add_vpp_config()
attach_3 = VppAbfAttach(self, 12, self.pg0.sw_if_index, 45)
attach_3.add_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
#
# remove the best priority
#
attach_2.remove_vpp_config()
self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg3)
#
# Attach one of the same policies to Pg1
#
attach_4 = VppAbfAttach(self, 12, self.pg1.sw_if_index, 45)
attach_4.add_vpp_config()
p_2 = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="1.1.1.1", dst="1.1.1.2") /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100))
self.send_and_expect(self.pg1, p_2 * NUM_PKTS, self.pg3)
#
# detach the policy from PG1, now expect traffic to be dropped
#
attach_4.remove_vpp_config()
self.send_and_assert_no_replies(self.pg1, p_2 * NUM_PKTS, "Detached")
#
# Swap to route via a next-hop in the non-default table
#
table_20 = VppIpTable(self, 20)
table_20.add_vpp_config()
self.pg4.set_table_ip4(table_20.table_id)
self.pg4.admin_up()
self.pg4.config_ip4()
self.pg4.resolve_arp()
abf_13 = VppAbfPolicy(self, 13, acl_1,
[VppRoutePath(self.pg4.remote_ip4,
0xffffffff,
nh_table_id=table_20.table_id)])
abf_13.add_vpp_config()
attach_5 = VppAbfAttach(self, 13, self.pg0.sw_if_index, 30)
attach_5.add_vpp_config()
self.send_and_expect(self.pg0, p_1
|
niwinz/django-greenqueue
|
greenqueue/scheduler/thread_scheduler.py
|
Python
|
bsd-3-clause
| 449
| 0.002227
|
# -*- coding: utf-8 -*-
f
|
rom threading import Thread, Lock
from time import sleep
from .base import SchedulerMixin
class Scheduler(SchedulerMixin, Thread):
"""
Threading scheduler.
"""
def sleep(self, seconds):
if seconds == 0:
return
sleep(seconds)
def return_callback(self, *args):
with Lock():
|
return self.callback(*args)
def run(self):
self.start_loop()
|
zagfai/webtul
|
webtul/log.py
|
Python
|
mit
| 1,135
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Log Config
"""
__author__ = 'Zagfai'
__date__ = '2018-06'
SANIC_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format':
'%(levelname)s [%(asctime)s %(name)s:%(lineno)d] %(message)s',
'datefmt': '%y%m%d
|
%H:%M:%S',
},
"access": {
"format": "VISIT [%(asctime)s %(host)s]: " +
"%(request)s %(message)s
|
%(status)d %(byte)d",
'datefmt': '%y%m%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
"access_console": {
"class": "logging.StreamHandler",
"formatter": "access",
},
},
'loggers': {
'': {
'level': 'INFO',
'handlers': ['console'],
'propagate': True
},
'sanic.access': {
'level': 'INFO',
'handlers': ['access_console'],
'propagate': False
},
}
}
|
unreal666/outwiker
|
src/outwiker/gui/preferences/wikieditorpanel.py
|
Python
|
gpl-3.0
| 1,446
| 0
|
# -*- coding: utf-8 -*-
import wx
from outwiker.gui.editorstyleslist import EditorStylesList
from outwiker.pages.wiki.wikiconfig import WikiConfig
from outwiker.gui.preferences.baseprefpanel import BasePrefPanel
class WikiEditorPanel(BasePrefPanel):
def __init__(self, parent, application):
super(type(self), self).__init__(parent)
self._config = WikiConfig(application.config)
self.__createGui()
self.__layout()
self.SetupScrolling()
def __createGui(self):
self._stylesList = EditorStylesList(self)
def __layout(self):
mainSizer = wx.FlexGridSizer(cols=1)
mainSizer.AddGrowableCol(0)
mainSizer.AddGrowableRow(0)
mainSizer.Add(self._stylesList, flag=wx.ALL | wx.EXPAND, border=2)
self.SetSizer(mainSizer)
self.Layout()
def LoadState(self):
self._stylesList.addStyle(_(u
|
"Link"), self._config.link.value)
self._stylesList.addStyle(_(u"Heading"), self._config.heading.value)
self._stylesList.addStyle(_(u"Command
|
"), self._config.command.value)
self._stylesList.addStyle(_(u"Comment"), self._config.comment.value)
def Save(self):
self._config.link.value = self._stylesList.getStyle(0)
self._config.heading.value = self._stylesList.getStyle(1)
self._config.command.value = self._stylesList.getStyle(2)
self._config.comment.value = self._stylesList.getStyle(3)
|
urfonline/api
|
config/wsgi.py
|
Python
|
mit
| 1,914
| 0
|
"""
WSGI config for api project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# api directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'api'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.p
|
roduction"
os.environ.setdefaul
|
t("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
chrisxue815/leetcode_python
|
problems/test_0211.py
|
Python
|
unlicense
| 1,179
| 0
|
import unittest
import utils
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
sel
|
f.root = {}
def addWord(self, word: str) -> None:
curr = self.root
for ch in word:
child = curr.get(ch)
if not child:
c
|
urr[ch] = child = {}
curr = child
curr['#'] = True
def search(self, word: str) -> bool:
def search(curr, start):
for i in range(start, len(word)):
ch = word[i]
if ch == '.':
for k, v in curr.items():
if k != '#' and search(v, i + 1):
return True
return False
else:
child = curr.get(ch)
if not child:
return False
curr = child
return curr.get('#', False)
return search(self.root, 0)
class Test(unittest.TestCase):
def test(self):
utils.test_invocations(self, __file__, WordDictionary)
if __name__ == '__main__':
unittest.main()
|
oemof/reegis-hp
|
reegis_hp/de21/results.py
|
Python
|
gpl-3.0
| 11,607
| 0.000431
|
import easygui_qt as easy
import pandas as pd
import numpy as np
import geoplot
from matplotlib import pyplot as plt
import math
from matplotlib.colors import LinearSegmentedColormap
MTH = {'sum': np.sum, 'max': np.max, 'min': np.min, 'mean': np.mean}
class SpatialData:
def __init__(self, result_file=None):
if result_file is None:
result_file = easy.get_file_names(title="Select result file.")[0]
print(result_file)
self.results = pd.read_csv(result_file, index_col=[0, 1, 2])
self.polygons = None
self.lines = None
self.plotter = None
def add_polygon_column(self, obj=None, direction=None, bus=None,
method=None, kws=None, **kwargs):
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
if self.polygons is None:
self.polygons = load_geometry(**kwargs)
if kws is None:
kws = ['line', 'GL', 'duals']
objects = list(set([
x[5:] for x in
self.results.index.get_level_values('obj_label').unique()
if not any(y in x for y in kws)]))
reg_buses = list(set([
x[5:] for x in
self.results.index.get_level_values('bus_label').unique()
if not any(y in x for y in kws)]))
global_buses = list(set([
x for x in
self.results.index.get_level_values('bus_label').unique()
if 'GL' in x]))
buses = reg_buses + global_buses
if obj is None:
obj = easy.get_choice("What object do you want to plot?",
choices=objects)
if direction is None:
direction = easy.get_choice("From bus or to bus?",
choices=['from_bus', 'to_bus'])
if bus is None:
bus = easy.get_choice("Which bus?", choices=buses)
for r in self.polygons.index:
try:
tmp = pd.Series(self.results.loc[
'{0}_{1}'.format(r, bus), direction,
'{0}_{1}'.format(r, obj)]['val']).groupby(
level=0).agg(MTH[method])[0]
except KeyError:
tmp = float('nan')
self.polygons.loc[r, obj] = tmp
uv = unit_round(self.polygons[obj])
self.polygons[obj] = uv['series']
self.polygons[obj].prefix = uv['prefix']
self.polygons[obj].prefix_long = uv['prefix_long']
selection = {'obj': obj,
'direction': direction,
'bus': bus,
'method': method}
return selection
def add_power_lines(self, method=None, **kwargs):
if self.lines is None:
self.lines = load_geometry(region_column='name', **kwargs)
if self.plotter is None:
self.plotter = geoplot.GeoPlotter(
geoplot.postgis2shapely(self.lines.geom), (3, 16, 47, 56))
else:
self.plotter.geometries = geoplot.postgis2shapely(self.lines.geom)
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
for l in self.lines.index:
try:
r = l.split('-')
tmp = pd.Series()
tmp.set_value(1, self.results.loc[
'{0}_bus_el'.format(r[0]), 'from_bus',
'{0}_{1}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
tmp.set_value(2, self.results.loc[
'{0}_bus_el'.format(r[1]), 'from_bus',
'{1}_{0}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
self.lines.loc[l, 'trans'] = tmp.max()
except KeyError:
self.lines.loc[l, 't
|
rans'] = 3000000
uv = unit_round(s
|
elf.lines['trans'])
self.lines['trans'] = uv['series']
self.lines['trans'].prefix = uv['prefix']
self.lines['trans'].prefix_long = uv['prefix_long']
return method
def load_geometry(geometry_file=None, region_column='gid'):
if geometry_file is None:
geometry_file = easy.get_file_names()[0]
return pd.read_csv(geometry_file, index_col=region_column)
def show():
plt.tight_layout()
plt.box(on=None)
plt.show()
def unit_round(values, min_value=False):
longprefix = {0: '', 1: 'kilo', 2: 'Mega', 3: 'Giga', 4: 'Tera',
5: 'Exa', 6: 'Peta'}
shortprefix = {0: '', 1: 'k', 2: 'M', 3: 'G', 4: 'T',
5: 'E', 6: 'P'}
if min_value:
def_value = min(values)
a = 1
else:
def_value = max(values)
a = 0
if def_value > 0:
factor = int(int(math.log10(def_value)) / 3) + a
else:
factor = 0
values = round(values / 10 ** (factor * 3), 2)
return {'series': values, 'prefix': shortprefix[factor],
'prefix_long': longprefix[factor]}
def add_labels(data, plotter, label=None,
coord_file='data/geometries/coord_region.csv'):
p = pd.read_csv(coord_file, index_col='name')
data.polygons['point'] = p.point
for row in data.polygons.iterrows():
if 'point' not in row[1]:
point = geoplot.postgis2shapely([row[1].geom, ])[0].centroid
else:
point = geoplot.postgis2shapely([row[1].point, ])[0]
(x, y) = plotter.basemap(point.x, point.y)
if label is None:
text = row[0][2:]
else:
text = str(round(row[1][label], 1))
if row[1].normalised < 0.3 or row[1].normalised > 0.95:
textcolour = 'white'
else:
textcolour = 'black'
plotter.ax.text(x, y, text, color=textcolour, fontsize=12)
start_line = plotter.basemap(9.7, 53.4)
end_line = plotter.basemap(10.0, 53.55)
plt.plot([start_line[0], end_line[0]], [start_line[1], end_line[1]], '-',
color='white')
def polygon_plot(l_min=None, l_max=None, setname=None, myset=None, method=None,
filename=None):
geometry = 'data/geometries/polygons_de21_simple.csv'
sets = {
'load': {
'obj': 'load',
'direction': 'from_bus',
'bus': 'bus_el'},
'pv': {
'obj': 'solar',
'direction': 'to_bus',
'bus': 'bus_el'},
}
if setname is None and myset is None:
setname = easy.get_choice("What object do you want to plot?",
choices=tuple(sets.keys()))
if setname is not None:
myset = sets[setname]
if method is None:
myset['method'] = easy.get_choice(
"Chose you method!", choices=['sum', 'max', 'min', 'mean'])
else:
myset['method'] = method
s_data = SpatialData(filename)
myset = s_data.add_polygon_column(geometry_file=geometry, **myset)
if myset['method'] == 'sum':
unit = 'Wh'
else:
unit = 'W'
unit = "[{0}]".format(s_data.polygons[myset['obj']].prefix + unit)
plotter = geoplot.GeoPlotter(geoplot.postgis2shapely(s_data.polygons.geom),
(3, 16, 47, 56))
v_min = s_data.polygons[myset['obj']].min()
v_max = s_data.polygons[myset['obj']].max()
s_data.polygons['normalised'] = ((s_data.polygons[myset['obj']] - v_min) /
(v_max - v_min))
plotter.data = s_data.polygons['normalised']
plotter.plot(facecolor='data', edgecolor='white')
add_labels(s_data, plotter, myset['obj'])
if l_min is None:
l_min = v_min
if l_max is None:
l_max = v_max
plotter.draw_legend((l_min, l_max), number_ticks=3, legendlabel=unit,
location='bottom')
show()
def powerline_plot(l_min=None, l_max=None):
s_data = SpatialData()
reg = {
'geometry_file': 'data/
|
agustinhenze/nikola.debian
|
nikola/data/themes/base/messages/messages_ur.py
|
Python
|
mit
| 1,784
| 0
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d منٹ کا مطالعہ باقی",
"(active)": "(فعال)",
"Also available in:": "ان زبانوں میں بھی دستیاب:",
"Archive": "آرکائیو",
"Categories": "زمرے",
"Comments": "تبصرے",
"LANGUAGE": "اردو",
"Languages:": "زبانیں:",
"More posts about %s": "%s کے بارے میں مزید تحاریر",
"Newer posts
|
": "نئی تحاریر",
"Next post": "اگلی تحریر",
"No posts found.": "کوئی تحریر نہیں مل سکی۔",
"Nothing found.": "کچھ نہیں مل سکا۔",
"Older posts": "پرانی تحاریر",
"Original site": "اصلی سائٹ",
"Posted:": "اشاعت:",
"Posts about %s": "%s کے بارے میں تحاریر",
"Posts for year %s": "سال %s کی تحاریر",
"Posts for {month} {day}, {year}": "{day} {month}، {year} کی تحاریر",
"Posts for {month} {year}": "{month} {year} کی تحاریر
|
",
"Previous post": "پچھلی تحریر",
"Publication date": "تاریخِ اشاعت",
"RSS feed": "آر ایس ایس فیڈ",
"Read in English": "اردو میں پڑھیں",
"Read more": "مزید پڑھیں",
"Skip to main content": "مرکزی متن پر جائیں",
"Source": "سورس",
"Subcategories:": "ذیلی زمرے",
"Tags and Categories": "ٹیگز اور زمرے",
"Tags": "ٹیگز",
"Write your page here.": "اپنے صفحے کا متن یہاں لکھیں۔",
"Write your post here.": "اپنی تحریر یہاں لکھیں۔",
"old posts, page %d": "پرانی تحاریر صفحہ %d",
"page %d": "صفحہ %d",
}
|
nicoddemus/pytest-xdist
|
src/xdist/scheduler/each.py
|
Python
|
mit
| 5,136
| 0
|
from py.log import Producer
from xdist.workermanage import parse_spec_config
from xdist.report import report_collection_diff
class EachScheduling:
"""Implement scheduling of test items on all nodes
If a node gets added after the test run is started then it is
assumed to replace a node which got removed before it finished
its collection. In this case it will only be used if a node
with the same spec got removed earlier.
Any nodes added after the run is started will only get items
assigned if a node with a matching spec was removed before it
finished al
|
l its pending items. The new node will then be
assigned the remaining items from the removed node.
"""
def __init__(self, config, log=None):
self.config = config
self.numnodes = len(parse_spec_config(config))
self.node2collection = {}
self.node2pending = {}
self._started = []
|
self._removed2pending = {}
if log is None:
self.log = Producer("eachsched")
else:
self.log = log.eachsched
self.collection_is_completed = False
@property
def nodes(self):
"""A list of all nodes in the scheduler."""
return list(self.node2pending.keys())
@property
def tests_finished(self):
if not self.collection_is_completed:
return False
if self._removed2pending:
return False
for pending in self.node2pending.values():
if len(pending) >= 2:
return False
return True
@property
def has_pending(self):
"""Return True if there are pending test items
This indicates that collection has finished and nodes are
still processing test items, so this can be thought of as
"the scheduler is active".
"""
for pending in self.node2pending.values():
if pending:
return True
return False
def add_node(self, node):
assert node not in self.node2pending
self.node2pending[node] = []
def add_node_collection(self, node, collection):
"""Add the collected test items from a node
Collection is complete once all nodes have submitted their
collection. In this case its pending list is set to an empty
list. When the collection is already completed this
submission is from a node which was restarted to replace a
dead node. In this case we already assign the pending items
here. In either case ``.schedule()`` will instruct the
node to start running the required tests.
"""
assert node in self.node2pending
if not self.collection_is_completed:
self.node2collection[node] = list(collection)
self.node2pending[node] = []
if len(self.node2collection) >= self.numnodes:
self.collection_is_completed = True
elif self._removed2pending:
for deadnode in self._removed2pending:
if deadnode.gateway.spec == node.gateway.spec:
dead_collection = self.node2collection[deadnode]
if collection != dead_collection:
msg = report_collection_diff(
dead_collection,
collection,
deadnode.gateway.id,
node.gateway.id,
)
self.log(msg)
return
pending = self._removed2pending.pop(deadnode)
self.node2pending[node] = pending
break
def mark_test_complete(self, node, item_index, duration=0):
self.node2pending[node].remove(item_index)
def mark_test_pending(self, item):
self.pending.insert(
0,
self.collection.index(item),
)
for node in self.node2pending:
self.check_schedule(node)
def remove_node(self, node):
# KeyError if we didn't get an add_node() yet
pending = self.node2pending.pop(node)
if not pending:
return
crashitem = self.node2collection[node][pending.pop(0)]
if pending:
self._removed2pending[node] = pending
return crashitem
def schedule(self):
"""Schedule the test items on the nodes
If the node's pending list is empty it is a new node which
needs to run all the tests. If the pending list is already
populated (by ``.add_node_collection()``) then it replaces a
dead node and we only need to run those tests.
"""
assert self.collection_is_completed
for node, pending in self.node2pending.items():
if node in self._started:
continue
if not pending:
pending[:] = range(len(self.node2collection[node]))
node.send_runtest_all()
node.shutdown()
else:
node.send_runtest_some(pending)
self._started.append(node)
|
sandvine/horizon
|
openstack_dashboard/api/neutron.py
|
Python
|
apache-2.0
| 50,711
| 0.00002
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Cisco Systems, Inc.
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import copy
import logging
import netaddr
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutron_client
import six
from horizon import exceptions
from horizon import messages
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
from openstack_dashboard.api import nova
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'}
OFF_STATE = 'OFF'
ON_STATE = 'ON'
ROUTER_INTERFACE_OWNERS = (
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
)
class NeutronAPIDictWrapper(base.APIDictWrapper):
def __init__(self, apidict):
if 'admin_state_up' in apidict:
if apidict['admin_state_up']:
apidict['admin_state'] = 'UP'
else:
apidict['admin_state'] = 'DOWN'
# Django cannot handle a key name with ':', so use '__'.
apidict.update({
key.replace(':', '__'): value
for key, value in apidict.items()
if ':' in key
})
super(NeutronAPIDictWrapper, self).__init__(apidict)
def set_id_as_name_if_empty(self, length=8):
try:
if not self._apidict['name'].strip():
id = self._apidict['id']
if length:
id = id[:length]
self._apidict['name'] = '(%s)' % id
except KeyError:
pass
def items(self):
return self._apidict.items()
@property
def name_or_id(self):
return (self._apidict.get('name').strip() or
'(%s)' % self._apidict['id'][:13])
class Agent(NeutronAPIDictWrapper):
"""Wrapper for neutron agents."""
class Network(NeutronAPIDictWrapper):
"""Wrapper for neutron Networks."""
def to_dict(self):
d = dict(super(NeutronAPIDictWrapper, self).to_dict())
d['subnets'] = [s.to_dict() for s in d['subnets']]
return d
class Subnet(NeutronAPIDictWrapper):
"""Wrapper for neutron subnets."""
def __init__(self, apidict):
apidict['ipver_str'] = get_ipver_str(apidict['ip_version'])
super(Subnet, self).__init__(apidict)
class SubnetPool(NeutronAPIDictWrapper):
"""Wrapper for neutron subnetpools."""
class Port(NeutronAPIDictWrapper):
"""Wrapper for neutron ports."""
def __init__(self, apidict):
if 'mac_learning_enabled' in apidict:
apidict['mac_state'] = \
ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE
pairs = apidict.get('allowed_address_pairs')
if pairs:
apidict = copy.deepcopy(apidict)
wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs]
apidict['allowed_address_pairs'] = wrapped_pairs
super(Port, self).__init__(apidict)
class PortAllowedAddressPair(NeutronAPIDictWrapper):
"""Wrapper for neutron port allowed address pairs."""
def __init__(self, addr_pair):
super(PortAllowedAddressPair, self).__init__(addr_pair)
# Horizon references id property for table operations
self.id = addr_pair['ip_address']
class Profile(NeutronAPIDictWrapper):
"""Wrapper for neutron profiles."""
_attrs = ['profile_id', 'name', 'segment_type', 'segment_range',
'sub_type', 'multicast_ip_index', 'multicast_ip_range']
class Router(NeutronAPIDictWrapper):
"""Wrapper for neutron routers."""
class RouterStaticRoute(NeutronAPIDictWrapper):
"""Wrapper for neutron routes extra route."""
def __init__(self, route):
super(RouterStaticRoute, self).__init__(route)
# Horizon references id property for table operations
self.id = route['nexthop'] + ":" + route['destination']
class SecurityGroup(NeutronAPIDictWrapper):
# Required attributes: id, name, description, tenant_id, rules
def __init__(self, sg, sg_dict=None):
if sg_dict is None:
sg_dict = {sg['id']: sg['name']}
sg['rules'] = [SecurityGroupRule(rule, sg_dict)
for rule in sg['security_group_rules']]
super(SecurityGroup, self).__init__(sg)
def to_dict(self):
return {k: self._apidict[k] for k in self._apidict if k != 'rules'}
@six.python_2_unicode_compatible
class SecurityGroupRule(NeutronAPIDictWrapper):
# Required attributes:
# id, parent_group_id
# ip_protocol, from_port, to_port, ip_range, group
# ethertype, direction (Neutron specific)
def _get_secgroup_name(self, sg_id, sg_dict):
if sg_id:
if sg_dict is None:
sg_dict = {}
# If sg name not found in sg_dict,
# first two parts of UUID is used as sg name.
return sg_dict.get(sg_id, sg_id[:13])
else:
return u''
def __init__(self, sgr, sg_dict=None):
# In Neutron, if both remote_ip_prefix and remote_group_id are None,
# it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0.
if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']:
if sgr['ethertype'] == 'IPv6':
sgr['remote_ip_prefix'] = '::/0'
else:
sgr['remote_ip_prefix'] = '0.0.0.0/0'
rule = {
'id': sgr['id'],
'parent_group_id': sgr['security_group_id'],
'direction': sgr['direction'],
'ethertype': sgr['ethertype'],
'ip_protocol': sgr['protocol'],
'from_port': sgr['port_range_
|
min'],
|
'to_port': sgr['port_range_max'],
}
cidr = sgr['remote_ip_prefix']
rule['ip_range'] = {'cidr': cidr} if cidr else {}
group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict)
rule['group'] = {'name': group} if group else {}
super(SecurityGroupRule, self).__init__(rule)
def __str__(self):
if 'name' in self.group:
remote = self.group['name']
elif 'cidr' in self.ip_range:
remote = self.ip_range['cidr']
else:
remote = 'ANY'
direction = 'to' if self.direction == 'egress' else 'from'
if self.from_port:
if self.from_port == self.to_port:
proto_port = ("%s/%s" %
(self.from_port, self.ip_protocol.lower()))
else:
proto_port = ("%s-%s/%s" %
(self.from_port, self.to_port,
self.ip_protocol.lower()))
elif self.ip_protocol:
try:
ip_proto = int(self.ip_protocol)
proto_port = "ip_proto=%d" % ip_proto
except Exception:
# well-defined IP protocol name like TCP, UDP, ICMP.
proto_port = self.ip_protocol
else:
proto_port = ''
return (_('ALLOW %(ethertype)s %(proto_port)s '
'%(direction)s %(remote)s') %
{'ethertype': self.ethertype,
'proto_port': proto_port,
|
samastur/flexlayout
|
flex/settings.py
|
Python
|
mit
| 5,343
| 0.00131
|
import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for flex project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's$r0^f&ny*lp!cqf=5l%$o3@)mkiu$=7=-b!lu+4gyv45&4vss'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'flex.urls'
WSGI_APPLICATION = 'flex.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'flex', 'static'),
)
SITE_ID = 1
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.core.context_processors.static',
'cms.context_processors.cms_settings'
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'flex', 'templates'),
)
INSTALLED_APPS = (
'djangocms_admin_style',
'djangocms_text_ckeditor',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_style',
'djangocms_column',
'djangocms_file',
'djangocms_flash',
'djangocms_googlemap',
'djangocms_inherit',
'djangocms_link',
'djangocms_picture',
'djangocms_teaser',
'djangocms_video',
'reversion',
'flex',
'notcms',
'plugins_layout',
)
LANGUAGES = (
## Customize this
('en', gettext('en')),
)
'''
CMS_LANGUAGES = {
## Customize this
'default': {
'public': True,
'hide_untranslated': False,
'redirect_on_fallback': True,
},
#1: [
# {
# 'public': True,
# 'code': 'en',
# 'hide_untranslated': False,
# 'name': gettext('en'),
# 'redirect_
|
on_fallback': True,
# },
#],
}
'''
CMS_TEMPLATES = (
#
|
# Customize this
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_CACHE_DURATIONS = {
'default': 1,
'menus': 1,
'permissions': 1
}
CMS_PERMISSION = False
CMS_PLACEHOLDER_CACHE = False
CMS_PLACEHOLDER_CONF = {
'page_layout': {
'plugins': ['NewColumnPlugin', 'ContainerPlugin']
}
}
DATABASES = {
'default':
{'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'project.db', 'HOST': 'localhost', 'USER': '', 'PASSWORD': '', 'PORT': ''}
}
MIGRATION_MODULES = {
'djangocms_column': 'djangocms_column.migrations_django',
'djangocms_flash': 'djangocms_flash.migrations_django',
'djangocms_googlemap': 'djangocms_googlemap.migrations_django',
'djangocms_inherit': 'djangocms_inherit.migrations_django',
'djangocms_link': 'djangocms_link.migrations_django',
'djangocms_style': 'djangocms_style.migrations_django',
'djangocms_file': 'djangocms_file.migrations_django',
'djangocms_picture': 'djangocms_picture.migrations_django',
'djangocms_teaser': 'djangocms_teaser.migrations_django',
'djangocms_video': 'djangocms_video.migrations_django'
}
|
adamredfern92/PlexDownload
|
plexapi/playqueue.py
|
Python
|
mit
| 3,815
| 0.002621
|
# -*- coding: utf-8 -*-
from plexapi import utils
from plexapi.base import PlexObject
class PlayQueue(PlexObject):
""" Control a PlayQueue.
Attributes:
key (str): This is only added to support playMedia
identifier (str): com.plexapp.plugins.library
initpath (str): Relative url where data was grabbed from.
items (list): List of :class:`~plexapi.media.Media` or class:`~plexapi.playlist.Playlist`
mediaTagPrefix (str): Fx /system/bundle/media/flags/
mediaTagVersion (str): Fx 1485957738
playQueueID (str): a id for the playqueue
playQueueSelectedItemID (str): playQueueSelectedItemID
playQueueSelectedItemOffset (str): playQueueSelectedItemOffset
playQueueSelectedMetadataItemID (<type 'str'>): 7
playQueueShuffled (bool): True if shuffled
playQueueSourceURI (str): Fx library://150425c9-0d99-4242-821e-e5ab81cd2221/item//library/metadata/7
playQueueTotalCount (str): How many items in the play queue.
playQueueVersion (str): What version the playqueue is.
server (:class:`~plexapi.server.PlexServer`): Server you are connected to.
size (str): Seems to be a alias for playQueueTotalCount.
"""
def _loadData(self, data):
self._data = data
self.identifier = data.attrib.get('identifier')
self.mediaTagPrefix = data.attrib.get('mediaTagPrefix')
|
self.mediaTagVersion = data.attrib.get('mediaTagVersion')
self.playQueueID = data.attrib.get('playQ
|
ueueID')
self.playQueueSelectedItemID = data.attrib.get('playQueueSelectedItemID')
self.playQueueSelectedItemOffset = data.attrib.get('playQueueSelectedItemOffset')
self.playQueueSelectedMetadataItemID = data.attrib.get('playQueueSelectedMetadataItemID')
self.playQueueShuffled = utils.cast(bool, data.attrib.get('playQueueShuffled', 0))
self.playQueueSourceURI = data.attrib.get('playQueueSourceURI')
self.playQueueTotalCount = data.attrib.get('playQueueTotalCount')
self.playQueueVersion = data.attrib.get('playQueueVersion')
self.size = utils.cast(int, data.attrib.get('size', 0))
self.items = self.findItems(data)
@classmethod
def create(cls, server, item, shuffle=0, repeat=0, includeChapters=1, includeRelated=1):
""" Create and returns a new :class:`~plexapi.playqueue.PlayQueue`.
Paramaters:
server (:class:`~plexapi.server.PlexServer`): Server you are connected to.
item (:class:`~plexapi.media.Media` or class:`~plexapi.playlist.Playlist`): A media or Playlist.
shuffle (int, optional): Start the playqueue shuffled.
repeat (int, optional): Start the playqueue shuffled.
includeChapters (int, optional): include Chapters.
includeRelated (int, optional): include Related.
"""
args = {}
args['includeChapters'] = includeChapters
args['includeRelated'] = includeRelated
args['repeat'] = repeat
args['shuffle'] = shuffle
if item.type == 'playlist':
args['playlistID'] = item.ratingKey
args['type'] = item.playlistType
else:
uuid = item.section().uuid
args['key'] = item.key
args['type'] = item.listType
args['uri'] = 'library://%s/item/%s' % (uuid, item.key)
path = '/playQueues%s' % utils.joinArgs(args)
data = server.query(path, method=server._session.post)
c = cls(server, data, initpath=path)
# we manually add a key so we can pass this to playMedia
# since the data, does not contain a key.
c.key = item.key
return c
|
30loops/django-sphinxdoc
|
setup.py
|
Python
|
bsd-3-clause
| 1,150
| 0
|
#! /usr/bin/env python
from distutils.core import setup
import sys
reload(sys).setdefaultencoding('Utf-8')
setup(
name='django-sphinxdoc',
version='1.0',
author='Stefan Scherfke',
author_email='stefan at sofa-rockers.org',
description='Easily integrate Sphinx documentation into your website.',
long_description=open('README.txt').read(),
url='http://stefan.sofa-rockers.org/django-sphinxdoc/',
download_url='http://bitbucket.org/scherfke/django-sphinxdoc/downloads/',
license='BSD',
packages=[
'sphinxdoc',
'sphinxdoc.management',
'sphinxdoc.management.commands',
],
package_data={
'sphinxdoc': ['te
|
mplates/sphinxdoc/*'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
|
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
ikargis/horizon_fod
|
horizon/messages.py
|
Python
|
apache-2.0
| 2,969
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drop-in replacement for django.contrib.messages which handles Horizon's
messaging needs (e.g. AJAX communication, etc.).
"""
from django.contrib import messages as _messages
from django.contrib.messages import constants
from django.utils.encoding import force_unicode # noqa
from django.utils.safestring import SafeData # noqa
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""Attempts to add a message to the request using the 'messages' app."""
if request.is_ajax():
tag = constants.DEFAULT_TAGS[level]
# if message is marked as safe, pass "safe" tag as extra_tags so that
# client can skip HTML escape for the message when rendering
if isinstance(message, SafeData):
extra_tags = extra_tags + ' safe'
request.horizon['async_messages'].append([tag,
force_unicode(message),
extra_tags])
else:
return _messages.add_message(request, level, message,
extra_tags, fail_silently)
def debug(request, message, extra_tags=
|
'', fail_silently=False):
"""Adds a message with the ``DEBUG`` level."""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tag
|
s='', fail_silently=False):
"""Adds a message with the ``INFO`` level."""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``SUCCESS`` level."""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``WARNING`` level."""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""Adds a message with the ``ERROR`` level."""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
SiCKRAGETV/SickRage
|
sickrage/providers/torrent/hdtorrents.py
|
Python
|
gpl-3.0
| 6,494
| 0.002156
|
# Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from urlparse import urljoin
from requests.utils import dict_from_cookiejar
import sickrage
from sickrage.core.caches.tv_cache import TVCache
from sickrage.core.helpers import bs4_parser, convert_size, try_int
from sickrage.providers import TorrentProvider
class HDTorrentsProvider(TorrentProvider):
def __init__(self):
super(HDTorrentsProvider, self).__init__("HDTorrents", 'https://hd-torrents.org', True)
self.urls.update({
'login': '{base_url}/login.php'.format(**self.urls),
'search': '{base_url}/torrents.php'.format(**self.urls)
})
self.username = None
self.password = None
self.minseed = None
self.minleech = None
self.freeleech = None
self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
self.cache = TVCache(self, min_time=30)
def _check_auth(self):
if not self.username or not self.password:
sickrage.app.log.warning(
"Invalid username or password. Check your settings")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'uid': self.username,
'pwd': self.password,
'submit': 'Confirm'}
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30).text
except Exception:
sickrage.app.log.warning("Unable to connect to provider")
return False
if re.search('You need cookies enabled to log in.', response):
sickrage.app.log.warning(
"Invalid username or password. Check your settings")
return False
return True
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
results = []
if not self.login():
return results
# Search Params
search_params = {
'search': '',
'active': 5 if self.freeleech else 1,
'options': 0,
'category[0]': 59,
'category[1]': 60,
'category[2]': 30,
'category[3]': 38,
'category[4]': 65,
}
for mode in search_strings:
sickrage.app.log.debug("Search Mode: %s" % mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
search_params['search'] = search_string
sickrage.app.log.debug("Search string: %s" % search_string)
try:
data = self.session.get(self.urls['search'], params=search_params).text
results += self.parse(data, mode)
|
except Exception:
|
sickrage.app.log.debug("No data returned from provider")
return results
def parse(self, data, mode, **kwargs):
"""
Parse search results from data
:param data: response data
:param mode: search mode
:return: search results
"""
results = []
# Search result page contains some invalid html that prevents html parser from returning all data.
# We cut everything before the table that contains the data we are interested in thus eliminating
# the invalid html portions
try:
index = data.lower().index('<table class="mainblockcontenttt"')
except ValueError:
sickrage.app.log.debug("Could not find table of torrents mainblockcontenttt")
return results
with bs4_parser(data[index:]) as html:
torrent_table = html.find('table', class_='mainblockcontenttt')
torrent_rows = torrent_table('tr') if torrent_table else []
if not torrent_rows or torrent_rows[2].find('td', class_='lista'):
sickrage.app.log.debug('Data returned from provider does not contain any torrents')
return results
# Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in
torrent_rows[0]('td')]
# Skip column headers
for row in torrent_rows[1:]:
try:
cells = row.findChildren('td')[:len(labels)]
if len(cells) < len(labels):
continue
title = cells[labels.index('Filename')].a
title = title.get_text(strip=True) if title else None
link = cells[labels.index('Dl')].a
link = link.get('href') if link else None
download_url = urljoin(self.urls['base_url'], link) if link else None
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('S')].get_text(strip=True))
leechers = try_int(cells[labels.index('L')].get_text(strip=True))
torrent_size = cells[labels.index('Size')].get_text()
size = convert_size(torrent_size, -1, ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB'])
results += [
{'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers}
]
if mode != 'RSS':
sickrage.app.log.debug("Found result: {}".format(title))
except Exception:
sickrage.app.log.error("Failed parsing provider")
return results
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/tests/factories/__init__.py
|
Python
|
apache-2.0
| 486
| 0
|
from .tower import (
create_instance,
create_instance_group,
create_organization,
create_job_template,
create_notification_template,
create_survey_spec,
cr
|
eate_workflow_job_template,
)
from .exc import (
NotUnique,
)
__all__ = [
'create_instance',
'create_instance_group',
'create_organization',
'create_job_template',
'cre
|
ate_notification_template',
'create_survey_spec',
'create_workflow_job_template',
'NotUnique',
]
|
JohnLZeller/dd-agent
|
checks.d/elastic.py
|
Python
|
bsd-3-clause
| 23,162
| 0.005008
|
# stdlib
from collections import namedtuple
import socket
import subprocess
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers, Platform
class NodeNotFound(Exception): pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'is_external',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda
|
v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
|
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
|
vlkv/reggata
|
reggata/data/commands.py
|
Python
|
gpl-3.0
| 40,730
| 0.004444
|
'''
Created on 23.07.2012
@author: vlkv
'''
from sqlalchemy.orm import contains_eager, joinedload_all
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import ResourceClosedError
import shutil
import datetime
import logging
import os.path
import reggata.errors as err
import reggata.helpers as hlp
import reggata.consts as consts
import reggata.data.db_schema as db
from reggata.user_config import UserConfig
from reggata.data import operations
from reggata.helpers import to_db_format
logger = logging.getLogger(__name__)
class AbstractCommand:
def _execute(self, unitOfWork):
raise NotImplementedError("Override this function in a subclass")
class GetExpungedItemCommand(AbstractCommand):
'''
Returns expunged (detached) object of Item class from database with given id.
'''
def __init__(self, itemId):
self.__itemId = itemId
def _execute(self, uow):
self._session = uow.session
item = self._session.query(db.Item)\
.options(joinedload_all('data_ref'))\
.options(joinedload_all('item_tags.tag'))\
.options(joinedload_all('item_fields.field'))\
.get(self.__itemId)
if item is None:
raise err.NotFoundError()
self._session.expunge(item)
return item
# TODO: Write a test for this command
class DeleteHangingTagsCommand(AbstractCommand):
'''
Deletes from database all hanging Tag objects. A hanging Tag is a Tag that
is not referenced by Items.
Returns number of deleted tags.
'''
def _execute(self, uow):
self._session = uow.session
sql = '''select tags.id, tags.name, tags.synonym_code
from tags left join items_tags on tags.id = items_tags.tag_id
where items_tags.item_id is NULL
'''
hangingTags = self._session.query(db.Tag).from_statement(sql).all()
count = len(hangingTags)
for tag in hangingTags:
self._session.delete(tag)
if count > 0:
self._session.commit()
return count
# TODO: Write a test for this command
class DeleteHangingFieldsCommand(AbstractCommand):
'''
Deletes from database all hanging Field objects. A hanging Field is a Field that
is not referenced by Items.
Returns number of deleted fields.
'''
def _execute(self, uow):
self._session = uow.session
sql = '''select fields.id, fields.name
from fields left join items_fields on fields.id = items_fields.field_id
where items_fields.item_id is NULL
'''
hangingFields = self._session.query(db.Field).from_statement(sql).all()
count = len(hangingFields)
for field in hangingFields:
self._session.delete(field)
if count > 0:
self._session.commit()
return count
class SaveThumbnailCommand(AbstractCommand):
def __init__(self, data_ref_id, thumbnail):
self.__dataRefId = data_ref_id
self.__thumbnail = thumbnail
def _execute(self, uow):
self._session = uow.session
data_ref = self._session.query(db.DataRef).get(self.__dataRefId)
self._session.refresh(data_ref) #TODO: Research if we can remove this refresh
self.__thumbnail.data_ref_id = data_ref.id
data_ref.thumbnails.append(self.__thumbnail)
self._session.add(self.__thumbnail)
self._session.commit()
self._session.refresh(self.__thumbnail)
self._session.expunge(self.__thumbnail)
self._session.expunge(data_ref)
# TODO: Write a test for this command
class GetUntaggedItems(AbstractCommand):
'''
Gets from database all alive elements without tags.
'''
def __init__(self, limit=0, page=1, order_by=""):
self.__limit = limit
self.__page = page
self.__orderBy = order_by
def _execute(self, uow):
self._session = uow.session
return self.__getUntaggedItems(self.__limit, self.__page, self.__orderBy)
def __getUntaggedItems(self, limit, page, order_by):
order_by_1 = ""
order_by_2 = ""
for col, direction in order_by:
if order_by_1:
order_by_1 += ", "
if order_by_2:
order_by_2 += ", "
order_by_2 += col + " " + direction + " "
if col == "title":
order_by_1 += col + " " + direction + " "
if order_by_1:
order_by_1 = " ORDER BY " + order_by_1
if order_by_2:
order_by_2 = " ORDER BY " + order_by_2
thumbnail_default_size = UserConfig().get("thumbnail_size", consts.THUMBNAIL_DEFAULT_SIZE)
if page < 1:
raise ValueError("Page number cannot be negative or zero.")
if limit < 0:
raise ValueError("Limit cannot be negative number.")
limit_offset = ""
if limit > 0:
offset = (page-1)*limit
limit_offset += "LIMIT {0} OFFSET {1}".format(limit, offset)
sql = '''
select sub.*, ''' + \
db.Item_Tag._sql_from() + ", " + \
db.Tag._sql_from() + ", " + \
db.Item_Field._sql_from() + ", " + \
db.Field._sql_from() + \
'''
from (select i.*, ''' + \
db.DataRef._sql_from() + ", " + \
db.Thumbnail._sql_from() + \
'''
from items i
left join items_tags it on i.id = it.item_id
left join data_refs on i.data_ref_id = data_refs.id
left join thumbnails on data_refs.id = thumbnails.data_ref_id and thumbnails.size = ''' + \
str(thumbnail_default_size) + '''
where
it.item_id is null
AND i.alive
''' + order_by_1 + " " + limit_offset + '''
) as sub
left join items_tags on sub.id = items_tags.item_id
left join tags on tags.id = items_tags.tag_id
left join items_fields on sub.id = items_fields.item_id
left join fields on fields.id = items_fields.field_id
''' + order_by_2
items = []
try:
items = self._session.query(db.Item)\
.options(contains_eager("data_ref"), \
contains_eager("data_ref.thumbnails"), \
contains_eager("item_tags"), \
contains_eager("item_tags.tag"), \
contains_eager("item_fields"),\
contains_eager("item_fields.field"))\
.from_statement(sql).all()
for item in items:
self._session.expunge(item)
except ResourceClosedError:
pass
return items
class QueryItemsByParseTree(AbstractCommand):
'''
Searches for items, according to given syntax parse tree (of query language).
'''
def __init__(self, query_tree, limit=0, page=1, order_by=[]):
self.__queryTree = query_tree
self.__limit = limit
self.__page = page
self.__orderBy = order_by
def _execute(self, uow):
self._session = uow.session
return self.__queryItemsByParseTree(self.__queryTree, self.__limit, self.__page,
self.__orderBy)
def __queryIt
|
emsByParseTree(self, query_tree, limit, page, order_by):
order_by_1 = ""
order_by_2 = ""
for col, direction in order_by:
if order_by_1:
order_by_1 += ", "
if order_by_2:
order_by_2 += ", "
order_by_2 += col + " " + direction + " "
if col == "title":
order_by_1 += col + " " +
|
direction + " "
if order_by_1:
order_by_1 = " ORDER BY " + order_by_1
if order_by_2:
order_by_2 = " ORDER BY " + order_by_2
sub_sql = query_tree.interpret()
if page < 1:
raise ValueError("Page number cannot be negative or zero.")
if limit < 0:
raise ValueError("Limit cannot be negative number.")
limit_offset = ""
if limit > 0:
offset = (page-1)*limit
li
|
PyFilesystem/pyfilesystem2
|
examples/upload.py
|
Python
|
mit
| 523
| 0
|
"""
Upload a file to a server (or o
|
ther filesystem)
Usage:
|
python upload.py FILENAME <FS URL>
example:
python upload.py foo.txt ftp://example.org/uploads/
"""
import os
import sys
from fs import open_fs
_, file_path, fs_url = sys.argv
filename = os.path.basename(file_path)
with open_fs(fs_url) as fs:
if fs.exists(filename):
print("destination exists! aborting.")
else:
with open(file_path, "rb") as bin_file:
fs.upload(filename, bin_file)
print("upload successful!")
|
zhuangjun1981/retinotopic_mapping
|
retinotopic_mapping/examples/signmap_analysis/scripts/script_analysis.py
|
Python
|
gpl-3.0
| 1,015
| 0.003941
|
__author__ = 'junz'
import os
import matplotlib.pyplot as plt
import retinotopic_mapping.RetinotopicMapping as rm
from tools import FileTools as ft
trialName = "160211_M214522_Trial1.pkl"
isSave = True
params = {'phaseMapFilterSigma': 1.,
'signMapFilterSigma': 9.,
'signMapThr': 0.3,
'eccMapFilterSigma': 15.0,
'splitLocalMinCutStep': 10.,
'closeIter': 3,
'openIter': 3,
'dilationIter': 15,
'borderWidth': 1,
'smallPatchThr': 100,
'visualSpacePixelSize': 0.5,
'visualSpaceCloseIter': 15,
'splitOverlapThr': 1.1,
'mergeOverlapThr': 0.1
}
currFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(currFolder)
trial = rm.loadTrial(trialName)
trial.params=params
trial.processTrial(isPlot=True)
trialDict = trial.ge
|
nerateTrialDict()
trial.plotTrial(isSave=isSave,saveFolder=currFolder)
plt.show()
if i
|
sSave:
ft.saveFile(trial.getName()+'.pkl',trialDict)
|
dakcarto/QGIS
|
python/plugins/processing/modeler/ModelerParametersDialog.py
|
Python
|
gpl-2.0
| 27,476
| 0.001237
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerParametersDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QUrl, QMetaObject
from PyQt4.QtGui import QDialog, QDialogButtonBox, QLabel, QLineEdit, QFrame, QPushButton, QSizePolicy, QVBoxLayout, QHBoxLayout, QTabWidget, QWidget, QScrollArea, QComboBox, QTableWidgetItem, QMessageBox
from PyQt4.QtWebKit import QWebView
from processing.modeler.ModelerAlgorithm import ValueFromInput, \
ValueFromOutput, Algorithm, ModelerOutput
from processing.gui.CrsSelectionPanel import CrsSelectionPanel
from processing.gui.MultipleInputPanel import MultipleInputPanel
from processing.gui.FixedTablePanel import FixedTablePanel
from processing.gui.RangePanel import RangePanel
from processing.gui.GeometryPredicateSelectionPanel import \
GeometryPredicateSelectionPanel
from processing.modeler.MultilineTextPanel import MultilineTextPanel
from processing.core.parameters import ParameterExtent, ParameterRaster, ParameterVector, ParameterBoolean, ParameterTable, ParameterFixedTable, ParameterMultipleInput, ParameterSelection, ParameterRange, ParameterNumber, ParameterString, ParameterCrs, ParameterTableField, ParameterFile, ParameterGeometryPredicate
from processing.core.outputs import OutputRaster, OutputVector, OutputTable, OutputHTML, OutputFile, OutputDirectory, OutputNumber, OutputString, OutputExtent
class ModelerParametersDialog(QDialog):
ENTER_NAME = '[Enter name if this is a final result]'
NOT_SELECTED = '[Not selected]'
USE_MIN_COVERING_EXTENT = '[Use min covering extent]'
def __init__(self, alg, model, algName=None):
QDialog.__init__(self)
self.setModal(True)
#The algorithm to define in this dialog. It is an instance of GeoAlgorithm
self._alg = alg
#The resulting algorithm after the user clicks on OK. it is an instance of the container Algorithm class
self.alg = None
#The model this algorithm is going to be added to
self.model = model
#The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self._algName = algName
self.setupUi()
self.params = None
def setupUi(self):
self.labels = {}
self.widgets = {}
self.checkBoxes = {}
self.showAdvanced = False
self.valueItems = {}
self.dependentItems = {}
self.resize(650, 450)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel
| QDialogButtonBox.Ok)
tooltips = self._alg.getParameterDescriptions()
self.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setSpacing(5)
self.verticalLayout.setMargin(20)
hLayout = QHBoxLayout()
hLayout.setSpacing(5)
hLayout.setMargin(0)
descriptionLabel = QLabel(self.tr("Description"))
self.descriptionBox = QLineEdit()
self.descriptionBox.setText(self._alg.name)
hLayout.addWidget(descriptionLabel)
hLayout.addWidget(self.descriptionBox)
self.verticalLayout.addLayout(hLayout)
line = QFrame()
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
self.verticalLayout.addWidget(line)
for param in self._alg.parameters:
if param.isAdvanced:
self.advancedButton = QPushButton()
self.advancedButton.setText(self.tr('Show advanced parameters'))
self.advancedButton.setMaximumWidth(150)
self.advancedButton.clicked.connect(
self.showAdvancedParametersClicked)
self.verticalLayout.addWidget(self.advancedButton)
break
for param in self._alg.parameters:
if param.hidden:
continue
desc = param.description
if isinstance(param, ParameterExtent):
desc += '(xmin, xmax, ymin, ymax)'
label = QLabel(desc)
self.labels[param.name] = label
widget = self.getWidgetFromParameter(param)
self.valueItems[param.name] = widget
if param.name in tooltips.keys():
tooltip = tooltips[param.name]
else:
tooltip = param.description
label.setToolTip(tooltip)
widget.setToolTip(tooltip)
if param.isAdvanced:
label.setVisible(self.showAdvanced)
widget.setVisible(self.showAdvanced)
self.widgets[param.name] = widget
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(widget)
for output in self._alg.outputs:
if output.hidden:
continue
if isinstance(output, (OutputRaster, OutputVector, OutputTable,
OutputHTML, OutputFile, OutputDirectory)):
label = QLabel(output.description + '<'
+ output.__class__.__name__ + '>')
item = QLineEdit()
if hasattr(item, 'setPlaceholderText'):
item.setPlaceholderText(ModelerParametersDialog.ENTER_NAME)
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(item)
self.valueItems[output.name] = item
label = QLabel(' ')
self.verticalLayout.addWidget(label)
label = QLabel(self.tr('Parent algorithms'))
self.dependenciesPanel = self.getDependenciesPanel()
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(self.dependenciesPanel)
self.verticalLayout.addStretch(1000)
self.setLayout(self.verticalLayout)
self.setPreviousValues()
self.setWindowTitle(self._alg.name)
|
self.verticalLayout2 = QVBoxLayout()
self.verticalLayout2.setSpacing(2)
self.verticalLayout2.setMargin(0)
self.tabWidget = QTabWidget()
self.tabWidget.setMinimumWidth(300)
self.paramPanel = QWidget()
self.paramPanel.setLayout(sel
|
f.verticalLayout)
self.scrollArea = QScrollArea()
self.scrollArea.setWidget(self.paramPanel)
self.scrollArea.setWidgetResizable(True)
self.tabWidget.addTab(self.scrollArea, self.tr('Parameters'))
self.webView = QWebView()
html = None
url = None
isText, help = self._alg.help()
if help is not None:
if isText:
html = help
else:
url = QUrl(help)
else:
html = self.tr('<h2>Sorry, no help is available for this '
'algorithm.</h2>')
try:
if html:
self.webView.setHtml(html)
elif url:
self.webView.load(url)
except:
self.webView.setHtml(self.tr('<h2>Could not open help file :-( </h2>'))
|
sebrandon1/nova
|
nova/tests/unit/virt/xenapi/stubs.py
|
Python
|
apache-2.0
| 14,212
| 0.000141
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
import mock
import pickle
import random
import sys
import fixtures
from oslo_serialization import jsonutils
import six
from nova import test
import nova.tests.unit.image.fake
from nova.virt.xenapi.client import session
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
def stubout_firewall_driver(stubs, conn):
def fake_none(self, *args):
return
_vmops = conn._vmops
stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
def stubout_instance_snapshot(stubs):
def fake_fetch_image(context, session, instance, name_label, image, type):
return {'root': dict(uuid=_make_fake_vdi(), file=None),
'kernel': dict(uuid=_make_fake_vdi(), file=None),
'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
def fake_wait_for_vhd_coalesce(*args):
# TODO(sirp): Should we actually fake out the data here
return "fakeparent", "fakebase"
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls, product_version=(5, 6, 2),
product_brand='XenServer', platform_version=(1, 9, 0),
**opt_args):
"""Stubs out methods from XenAPISession."""
stubs.Set(session.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
lambda s: (product_version, product_brand))
stubs.Set(session.XenAPISession, '_get_platform_version',
lambda s: platform_version)
def stubout_get_this_vm_uuid(stubs):
def f(session):
vms = [rec['uuid'] for ref, rec
in six.iteritems(fake.get_all_records('VM'))
if rec['is_control_domain']]
return vms[0]
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
def stubout_image_service_download(stubs):
def fake_download(*args, **kwargs):
pass
stubs.Set(nova.tests.unit.image.fake._FakeImageService,
'download', fake_download)
def stubout_stream_disk(stubs):
def fake_stream_disk(*args, **kwargs):
pass
stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs stu have PV kernels."""
def f(*args):
return False
stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
def stubout_is_snapshot(stubs):
"""Always returns true
xenapi fake driver does not create vmrefs for snapshots.
"""
def f(*args):
return True
stubs.Set(vm_utils, 'is_snapshot', f)
def stubout_lookup_image(stubs):
"""Simulates a failure in lookup image."""
def f(_1, _2, _3, _4):
raise Exception("Test Exception raised by fake lookup_image")
stubs.Set(vm_utils, 'lookup_image', f)
def stubout_fetch_disk_image(stubs, raise_failure=False):
"""Simulates a failure in fetch image_glance_disk."""
def _fake_fetch_disk_image(context, session, instance, name_label, image,
image_type):
if raise_failure:
raise fake.Failure("Test Exception raised by "
"fake fetch_image_glance_disk")
elif image_type == vm_utils.ImageType.KERNEL:
filename = "kernel"
elif image_type == vm_utils.ImageType.RAMDISK:
filename = "ramdisk"
else:
filename = "unknown"
vdi_type = vm_utils.ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
def stubout_create_vm(stubs):
"""Simulates a failure in create_vm."""
def f(*args):
raise fake.Failure("Test Exception raised by fake create_vm")
stubs.Set(vm_utils, 'create_vm', f)
def stubout_attach_disks(stubs):
"""Simulates a failure in _attach_disks."""
def f(*args):
raise fake.Failure("Test E
|
xception raised by fake _attach_disks")
stubs.Set(vmops.VMOps, '_attach_disks', f)
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', sr_ref)
vdi_rec = fake.get_record('VDI', vdi_ref)
return
|
vdi_rec['uuid']
class FakeSessionForVMTests(fake.SessionBase):
"""Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
"*filter\n"
":INPUT ACCEPT [0:0]\n"
":FORWARD ACCEPT [0:0]\n"
":OUTPUT ACCEPT [0:0]\n"
"COMMIT\n"
"# Completed on Sun Nov 6 22:49:02 2011\n")
def host_call_plugin(self, _1, _2, plugin, method, _5):
plugin = plugin.rstrip('.py')
if plugin == 'glance' and method in ('download_vhd2'):
root_uuid = _make_fake_vdi()
return pickle.dumps(dict(root=dict(uuid=root_uuid)))
elif (plugin, method) == ('xenhost', 'iptables_config'):
return fake.as_json(out=self._fake_iptables_save_output,
err='')
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
vm['domid'] = random.randrange(1, 1 << 16)
return vm
def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
vm_rec['resident_on'] = host_ref
def VDI_snapshot(self, session_ref, vm_ref, _1):
sr_ref = "fakesr"
return fake.create_vdi('fakelabel', sr_ref, read_only=True)
def SR_scan(self, session_ref, sr_ref):
pass
class FakeSessionForFirewallTests(FakeSessionForVMTests):
"""Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
if hasattr(test_case, '_in_rules'):
self._in_rules = test_case._in_rules
if hasattr(test_case, '_in6_filter_rules'):
self._in6_filter_rules = test_case._in6_filter_rules
self._test_case = test_case
def host_call_plugin(self, _1, _2, plugin, method, args):
"""Mock method for host_call_plugin to be used in unit tests
for the dom0 iptables Firewall drivers for XenAPI
"""
plugin = plugin.rstrip('.py')
if plugin == 'xenhost' and method == 'iptables_config':
# The command to execute is a json-encoded list
cmd_args = args.get('cmd_args', None)
cmd = jsonutils.loads(cmd_args)
if not cmd:
ret_str = ''
else:
output = ''
process_input = args.get('process_input', None)
if cmd == ['ip6tables-save', '-
|
INCF/pybids
|
bids/conftest.py
|
Python
|
mit
| 1,063
| 0.003763
|
"""
This module allows you to mock the config file as needed.
A default fixture that simply returns a safe-to-modify copy of
the default value is provided.
This can be overridden by parametrizing over the option you wish to
mock.
e.g.
>>> @pytest.mark.parametrize("extension_initial_dot", (True, False))
... def test_fixture(mock_config, extension_initial_dot):
... import bids
... assert bids.config.get_option("extension_initial_dot") == extension_initial_dot
"""
from unittest.mock import patch
import pytest
@pytest.fixture
def config_paths():
import bids.config
return bids.config.get_option('config_paths').copy()
@pytest.fixture
def extension_initial_dot():
import bids.config
return bids.config.get_option('extension_initial_dot')
@pytest.fixture
def mock_config(config_paths, extension_initial_dot):
import bids.config
with patch.
|
dict('bids.config._settings'):
bids.config._settings['co
|
nfig_paths'] = config_paths
bids.config._settings['extension_initial_dot'] = extension_initial_dot
yield
|
karimbahgat/PyAgg
|
__private__/temp_dev/rendererclass3orrevert.py
|
Python
|
mit
| 12,788
| 0.014154
|
# Check dependencies
import PIL, PIL.Image, PIL.ImageTk
import aggdraw
import affine
import itertools
def grouper(iterable, n):
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=None, *args)
# Main class
class AggRenderer:
"""
This class is used to draw each feature with aggdraw as long as
it is given instructions via a color/size/options dictionary.
"""
def __init__(self):
self.sysfontfolders = dict([("windows","C:/Windows/Fonts/")])
self.fontfilenames = dict([("default", "TIMES.TTF"),
("times new roman","TIMES.TTF"),
("arial","ARIAL.TTF")])
def new_image(self, width, height, background=None, mode="RGBA"):
"""
This must be called before doing any rendering.
Note: this replaces any previous image drawn on so be sure to
retrieve the old image before calling it again to avoid losing work
"""
self.img = PIL.Image.new(mode, (width, height), background)
self.width,self.height = width,height
self.drawer = aggdraw.Draw(self.img)
# Image operations
def resize(self, width, height):
pass
def rotate(self, angle):
pass
def flip():
pass
def move():
pass
def crop():
pass
def skew():
pass
def tilt_right(self, angle):
# use pyeuclid to get the Matrix4 coefficients for the PIL perspective function
pass
def tilt_top(self, angle):
# use pyeuclid to get the Matrix4 coefficients for the PIL perspective function
pass
# Color quality
def brightness():
pass
def contrast():
pass
def color_tint():
|
# add rgb color to each pixel
pass
# Drawing
def coordinate_space(self, bbox=None, center=None, width=None, height=None, lock_ratio=False):
"""
Defines which areas of the screen represent which areas in the
|
given drawing coordinates. Default is to draw directly with
screen pixel coordinates. Call this method with no arguments to
reset to this default. The screen coordinates can be specified in
two ways: with a bounding box, or with a coordinate point to center
the view on. WARNING: Currently, if using the center approach you
cannot specify the direction of the axes, so the zero-point is simply
assumed to be in the topleft.
- bbox: If supplied, each corner of the bbox will be mapped to each corner of the screen.
- center: The coordinate point to focus on at the center of the screen.
- width: Width to see around the center point.
- height: Height to see around the center point.
- lock_ratio: Set to True if wanting to constrain the coordinate space to have the same width/height ratio as the image, in order to avoid distortion. Default is False.
"""
# default to pixel coords if empty
if bbox == center == width == height == None:
self.drawer.settransform()
# basic info
xleft,ytop,xright,ybottom = bbox
x2x = (xleft,xright)
y2y = (ybottom,ytop)
xwidth = max(x2x)-min(x2x)
yheight = max(y2y)-min(y2y)
# calculate bbox from center if provided
if not bbox and center:
midx,midy = center
halfwidth = width / 2.0
halfheight = height / 2.0
bbox = [midx - halfwidth, midy - halfheight,
midx + halfwidth, midy + halfheight]
# maybe constrain the view ratio
# NOT WORKING
if lock_ratio:
print self.width,self.height
coordratio = xwidth/float(yheight)
screenratio = self.width/float(self.height)
if screenratio < 1:
yheight /= screenratio
print screenratio, xwidth, yheight
## wratio = self.width/float(xwidth)
## hratio = self.height/float(yheight)
## ratio = max(wratio,hratio)
## xwidth = ratio*xwidth
## yheight = ratio*yheight
# works
## aspect = self.width/float(self.height) # multiply this with longest side
## print xwidth,yheight,self.width,self.height,aspect
##
## if xwidth < yheight:
## #xwidth = self.width/float(xwidth)
## yheight = xwidth * aspect
## elif yheight < xwidth:
## #yheight = self.height/float(yheight)
## xwidth = yheight * aspect
## print xwidth,yheight
## print (xwidth > yheight),(self.width > self.height)
## screen_ratio = self.height / float(self.width)
## coord_ratio = yheight / float(xwidth)
## if coord_ratio != screen_ratio:
## if coord_ratio > screen_ratio:
## use_ratio = screen_ratio
## elif screen_ratio > coord_ratio:
## use_ratio = coord_ratio
## # coords too wide, constrain x based on y
## if yheight < xwidth: xwidth = yheight * use_ratio
## # coords too thin, constrain y based on x
## if xwidth < yheight: yheight = xwidth * use_ratio
# Note: The sequence of matrix multiplication is important and sensitive.
# scale ie resize world to screen coords
scalex = self.width / float(xwidth)
scaley = self.height / float(yheight)
scaled = affine.Affine.scale(scalex,scaley)
scaled *= affine.Affine.translate(-xleft,-ytop) # to force anchor upperleft world coords to upper left screen coords
# flip world coords if axes run in different direction than screen axes
xflip = xright < xleft
yflip = ybottom < ytop
if xflip: xflipoff = xwidth
else: xflipoff = 0
if yflip: yflipoff = yheight
else: yflipoff = 0
flipped = affine.Affine.translate(xflipoff,yflipoff) # to make the flipping stay in same place
flipped *= affine.Affine.flip(xflip,yflip)
# calculate the final coefficients and set as the drawtransform
transcoeffs = (scaled * flipped).coefficients
a,b,c,d,e,f = transcoeffs
self.drawer.settransform((a,b,c,d,e,f))
def draw_point(self, xy, symbol="circle", **options):
"""
Draw a point as one of several symbol types.
"""
args = []
fillsize = options["fillsize"]
if options["outlinecolor"]:
pen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(pen)
if options["fillcolor"]:
brush = aggdraw.Brush(options["fillcolor"])
args.append(brush)
x,y = xy
bbox = [x-fillsize, y-fillsize, x+fillsize, y+fillsize]
if symbol == "circle":
self.drawer.ellipse(bbox, *args)
elif symbol == "square":
self.drawer.rectangle(bbox, *args)
def draw_lines(self, coords, **options):
"""
Connect a series of flattened coordinate points with one or more lines.
"""
path = aggdraw.Path()
def traverse_ring(coords):
# begin
coords = grouper(coords, 2)
startx,starty = next(coords)
path.moveto(startx, starty)
# connect to each successive point
for nextx,nexty in coords:
path.lineto(nextx, nexty)
# get drawing tools from options
args = []
if options["outlinecolor"]:
pen = aggdraw.Pen(options["outlinecolor"], options["outlinewidth"])
args.append(pen)
if options["fillcolor"]:
brush = aggdraw.Brush(options["fillcolor"])
args.append(brush)
# draw the constructed path
self.drawer.path((0,0), path, *args)
def draw_polygon(self, coords, holes=[], **options):
"""
Draw polygon and holes with color fill.
Note: holes must be cou
|
baverman/cachel
|
cachel/offload.py
|
Python
|
mit
| 8,287
| 0.000724
|
import logging
from functools import wraps
from collections import deque
from threading import Thread
from time import time, sleep
from .base import make_key_func, get_serializer, get_expire
from .compat import PY2, listitems
log = logging.getLogger('cachel')
class OffloadCacheWrapper(object):
def __init__(self, func, keyfunc, serializer, cache1,
cache2, ttl1, ttl2, expire, offload):
self.id = '{}.{}'.format(func.__module__, func.__name__)
self.func = func
self.cache1 = cache1
self.cache2 = cache2
self.keyfunc = keyfunc
self.dumps, self.loads = serializer
self.ttl1 = ttl1
self.ttl2 = ttl2
self.expire = expire or ttl1
self.offload = offload
def __call__(self, *args, **kwargs):
k = self.keyfunc(*args, **kwargs)
result = self.cache1.get(k)
if result is None:
result = self.cache2.get(k)
if result is None:
result = self.func(*args, **kwargs)
sdata = self.dumps(result)
self.cache1.set(k, sdata, self.ttl1)
self.cache2.set(k, self.dumps2(sdata), self.ttl2)
else:
expire, result = self.loads2(result)
self.cache1.set(k, result, self.ttl1)
if time() > expire:
self.offload(self, k, args, kwargs)
return self.loads(result)
return result
else:
return self.loads(result)
def loads2(self, data):
expire, _, data = data.partition(b':')
return int(expire), data
def dumps2(self, data, now=None): # pragma: nocover
expire = int((now or time()) + self.expire)
if PY2:
return '{}:{}'.format(expire, data)
else:
return str(expire).encode() + b':' + data
def get(self, *args, **kwargs):
k = self.keyfunc(*args, **kwargs)
result = self.cache2.get(k)
if result:
return self.loads(self.loads2(result)[1])
def set(self, value, *args, **kwargs):
k = self.keyfunc(*args, **kwargs)
self.cache2.set(k, self.dumps2(self.dumps(value)), self.ttl2)
def invalidate(self, *args, **kwargs):
key = self.keyfunc(*args, **kwargs)
self.cache2.delete(key)
class OffloadObjectsCacheWrapper(OffloadCacheWrapper):
def __call__(self, ids, *args, **kwargs):
if not isinstance(ids, (list, tuple)):
ids = list(ids)
loads = self.loads
loads2 = self.loads2
now = time()
keys = self.keyfunc(ids, *args, **kwargs)
cresult = {}
if keys:
for oid, value in zip(ids, self.cache1.mget(keys)):
if value is not None:
cresult[oid] = loads(value)
c2_ids_to_fetch = list(set(ids) - set(cresult))
c2_keys = self.keyfunc(c2_ids_to_fetch, *args, **kwargs)
c2_result = {}
offload_ids = []
update_data = []
if c2_ids_to_fetch:
for key, oid, value in zip(c2_keys, c2_ids_to_fetch, self.cache2.mget(c2_keys)):
if value is not None:
expire, data = loads2(value)
update_data.append((key, data))
if now > expire:
offload_ids.append(oid)
c2_result[oid] = loads(data)
cresult.update(c2_result)
if update_data:
self.cache1.mset(update_data, self.ttl1)
if offload_ids:
self.offload(self, offload_ids, args, kwargs, multi=True)
ids_to_fetch = set(c2_ids_to_fetch) - set(c2_result)
if ids_to_fetch:
fresult = self._get_func_result(ids_to_fetch, args, kwargs, now)
cresult.update(fresult)
return cresult
def _get_func_result(self, ids, args, kwargs, now=None):
now = now or time()
|
dumps = self.dumps
dumps2 = self.dumps2
fresult = self.func(ids, *args, **kwargs)
if fresult:
to_cache_pairs = listitems(fresult)
to_cache_ids, to_cache_values = zip(*to_cache_pairs)
keys = self.
|
keyfunc(to_cache_ids, *args, **kwargs)
values = [dumps(r) for r in to_cache_values]
evalues = [dumps2(r, now) for r in values]
self.cache1.mset(zip(keys, values), self.ttl1)
self.cache2.mset(zip(keys, evalues), self.ttl2)
nonexisting_ids = set(ids) - set(fresult)
if nonexisting_ids:
self.invalidate(nonexisting_ids, *args, **kwargs)
return fresult
def one(self, id, *args, **kwargs):
default = kwargs.pop('_default', None)
return self([id], *args, **kwargs).get(id, default)
def invalidate(self, ids, *args, **kwargs):
keys = self.keyfunc(ids, *args, **kwargs)
self.cache2.mdelete(keys)
def default_offload(cache, key, args, kwargs, multi=False):
try:
if multi:
cache._get_func_result(key, args, kwargs)
else:
result = cache.func(*args, **kwargs)
sdata = cache.dumps(result)
cache.cache1.set(key, sdata, cache.ttl1)
cache.cache2.set(key, cache.dumps2(sdata), cache.ttl2)
except Exception:
log.exception('Error refreshing offload cache key')
def offloader(func):
@wraps(func)
def inner(cache, key, args, kwargs, multi=False):
params = {'cache_id': cache.id, 'key': key, 'args': args,
'kwargs': kwargs, 'multi': multi}
func(params)
return inner
class make_offload_cache(object):
def __init__(self, cache1, cache2, ttl1=600, ttl2=None, expire=None, fmt='msgpack',
fuzzy_ttl=True, offload=None):
self.caches = {}
self.cache1 = cache1
self.cache2 = cache2
self.ttl1 = ttl1
self.ttl2 = ttl2
self.expire = expire
self.fmt = fmt
self.fuzzy_ttl = fuzzy_ttl
self.offload = offload
def _wrapper(self, cls, tpl, ttl1, ttl2, expire, fmt, fuzzy_ttl, multi=False):
def decorator(func):
ttl = ttl1 or self.ttl1
fttl = self.fuzzy_ttl if fuzzy_ttl is None else fuzzy_ttl
cache = cls(
func,
make_key_func(tpl, func, multi),
get_serializer(fmt or self.fmt),
self.cache1,
self.cache2,
get_expire(ttl, fttl),
ttl2 or self.ttl2 or ttl * 2,
expire or self.expire,
self.offload or default_offload
)
self.caches[cache.id] = cache
return wraps(func)(cache)
return decorator
def __call__(self, tpl, ttl1=None, ttl2=None, expire=None, fmt=None, fuzzy_ttl=None):
return self._wrapper(OffloadCacheWrapper, tpl, ttl1,
ttl2, expire, fmt, fuzzy_ttl)
def objects(self, tpl, ttl1=None, ttl2=None, expire=None, fmt=None, fuzzy_ttl=None):
return self._wrapper(OffloadObjectsCacheWrapper, tpl, ttl1, ttl2,
expire,fmt, fuzzy_ttl, multi=True)
def offload_helper(self, params):
cache_id = params.pop('cache_id')
cache = self.caches.get(cache_id)
if not cache: # pragma: no cover
log.error('Cache not found: %s', cache_id)
return
default_offload(cache, **params)
class ThreadOffloader(object):
def __init__(self, size=1000):
self.queue = deque(maxlen=size)
def __call__(self, cache, key, args, kwargs, multi=False):
self.queue.appendleft((cache, key, args, kwargs, multi))
def pop(self):
try:
return self.queue.pop()
except IndexError:
pass
def worker(self):
pop = self.pop
wait = None
while True:
item = pop()
if item:
default_offload(*item)
wait = None
else:
if not wait:
wait = iter([0.001, 0.01, 0.1, 0.5])
sleep(next(wait, 1))
def ru
|
kephale/TuftsCOMP135_Spring2016
|
Lecture18/notebooks/Lecture18.py
|
Python
|
apache-2.0
| 9,270
| 0.01068
|
%matplotlib inline
import itertools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy
import scipy.spatial
from scipy import linalg
from sklearn.cluster import KMeans
from sklearn import mixture
from scipy.io import arff
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
from sklearn import preprocessing
import os
import os.path
import random
import pickle
rng = np.random.RandomState(0)
image_directory = '/Users/kyle/git/TuftsCOMP135_Spring2016/Lecture18/images/'
# https://archive.ics.uci.edu/ml/datasets/Iris
iris = datasets.load_iris()
fullX = iris.data[:, :2]
fullX1 = iris.data[:, 0]
fullX2 = iris.data[:, 1]
fully = iris.target
X = []
X1 = []
X2 = []
y = []
for k in range(len(fully)):
if fully[k] != 2:
X += [ fullX[k] ]
y += [ fully[k] ]
X1 += [ fullX1[k] ]
X2 += [ fullX2[k] ]
colors = []
color_map = [ (1,0,0), (0,0,1) ]
for c in y:
if c == 0:
colors += [ color_map[0] ]
else:
colors += [ color_map[1] ]
areas = [ 80 for _ in range(len(X)) ]
plt.scatter( X1, X2, c=colors, s=areas )
ax=plt.gca()
ax.set_xlabel('sepal length')
ax.set_ylabel('sepal width')
handles, labels = ax.get_legend_handles_labels()
#plt.show()
plt.savefig(image_directory+'iris_setosa_versicolor.png')
# SVM: Plot the margin
nX = preprocessing.scale(X)
nX1 = [ r[0] for r in nX ]
nX2 = [ r[1] for r in nX ]
clf = svm.SVC(kernel='linear')
clf.fit(nX, y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the
|
separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[
|
:, 1],
s=150, facecolors='none')
#plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.scatter( nX1, nX2, c=colors, s=areas )
ax=plt.gca()
ax.set_xlabel('sepal length')
ax.set_ylabel('sepal width')
plt.axis('tight')
plt.show()
# Plotting kernels for Iris
# Our dataset and targets
nX = preprocessing.scale(X)
nX1 = [ r[0] for r in nX ]
nX2 = [ r[1] for r in nX ]
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(nX, y)
# plot the line, the points, and the nearest vectors to the plane
#plt.figure(fignum, figsize=(4, 3))
plt.figure(fignum)
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(nX1, nX2, c=y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
#plt.figure(fignum, figsize=(4, 3))
plt.figure(fignum)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.savefig(image_directory+'svm_kernelexample_' + kernel + '_iris.svg')
# Noisy version of the data
rng = np.random.RandomState(0)
noise_level = 0.9
noisyX1 = [ x + noise_level * np.random.rand() for x in X1 ]
noisyX2 = [ x + noise_level * np.random.rand() for x in X2 ]
areas = [ 80 for _ in range(len(X)) ]
plt.scatter( noisyX1, noisyX2, c=colors, s=areas )
ax=plt.gca()
ax.set_xlabel('sepal length')
ax.set_ylabel('sepal width')
handles, labels = ax.get_legend_handles_labels()
#plt.show()
plt.savefig(image_directory+'iris_setosa_versicolor_noisy.svg')
# SVM: Plot the margin
nX = preprocessing.scale( np.array( zip( noisyX1, noisyX2 )) )
nX1 = [ r[0] for r in nX ]
nX2 = [ r[1] for r in nX ]
clf = svm.SVC(kernel='linear')
clf.fit(nX, y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-2.5, 2.5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=150, facecolors='none')
#plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.scatter( nX1, nX2, c=colors, s=areas )
ax=plt.gca()
ax.set_xlabel('sepal length')
ax.set_ylabel('sepal width')
plt.axis('tight')
#plt.show()
plt.savefig(image_directory+'iris_setosa_versicolor_noisy_svm.svg')
# Scikit version
# we create 40 separable points
np.random.seed(0)
skX = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
skY = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(skX, skY)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k--')
plt.plot(xx, yy_down, 'k-')
plt.plot(xx, yy_up, 'k-')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(skX[:, 0], skX[:, 1], c=skY, cmap=plt.cm.Paired)
plt.axis('tight')
#plt.show()
#plt.savefig(image_directory+'random_data_margins.svg')
#plt.savefig(image_directory+'svm_decisionrule.png')
#plt.savefig(image_directory+'svm_constraints.png')
plt.savefig(image_directory+'svm_vector_within_margin.png')
# Scikit Learn's SVM kernels
# Our dataset and targets
skX = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
skY = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(skX, skY)
# plot the line, the points, and the nearest vectors to the plane
#plt.figure(fignum, figsize=(4, 3))
plt.figure(fignum)
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(skX[:, 0], skX[:, 1], c=skY, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
#plt.figure(fignum, figsize=(4, 3))
plt.figure(fignum)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.savefig(image_directory+'svm_kernelexample_' + kernel + '_sk.svg')
#plt.show()
### Draw multiple steps of SVM
num_steps = 20
for steps in range( 1, num_steps ):
# fit the model
clf = svm.SVC(kernel='linear',max_iter=steps)
clf.fit(skX, skY)
|
peterwilletts24/Monsoon-Python-Scripts
|
rain/land_sea_diurnal/rain_mask_save_lat_lon_west_southern_indian_ocean.py
|
Python
|
mit
| 7,070
| 0.019378
|
import os, sys
import datetime
import iris
import iris.unit as unit
import iris.analysis.cartography
import numpy as np
from iris.coord_categorisation import add_categorised_coord
diag = 'avg.5216'
cube_name_explicit='stratiform_rainfall_rate'
cube_name_param='convective_rainfall_rate'
pp_file_path='/projects/cascade/pwille/moose_retrievals/'
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ]
#experiment_ids = [ 'dklwu', 'dklzq', 'dklyu', 'dkmbq', 'dkbhu', 'djznu', 'dkhgu', 'djzns' ]
#experiment_ids = ['djznu', 'dkhgu' ] # High Res
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq']
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkmbq', 'dklzq', 'dkjxq' ] # Params
# Load global LAM
dtmindt = datetime.datetime(2011,8,19,0,0,0)
dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
latmin=-10
latmax=5
lonmin=64.115
lonmax=80
lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax)
lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)
fg = '%sdjzn/djznw/%s.pp' % (pp_file_path, diag)
glob_load = iris.load_cube(fg, ('%s' % cube_name_param) & time_constraint)
## Get time points from global LAM to use as time constraint when loading other runs
time_list = glob_load.coord('time').points
glob_tc = iris.Constraint(time=time_list)
del glob_load
def unrotate_pole_update_cube(cube):
lat
|
= cube.coord('grid_latitude').points
lon = cube.coord('grid_longitude').points
|
cs = cube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - %s - Unrotate pole %s' % (diag, experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
for i, coord in enumerate (cube.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_cube = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_cube = i
csur=cs.ellipsoid
cube.remove_coord('grid_latitude')
cube.remove_coord('grid_longitude')
cube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_cube)
cube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_cube)
return cube
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
fu = '%s%s/%s/%s.pp' % (pp_file_path, expmin1, experiment_id, diag)
flsm = '%s%s/%s/30.pp' % (pp_file_path, expmin1, experiment_id)
print experiment_id
sys.stdout.flush()
try:
#cube_names = ['%s' % cube_name_param, '%s' % cube_name_explicit]
cubeconv = iris.load_cube(fu,'%s' % cube_name_param & glob_tc)
cubeconv= unrotate_pole_update_cube(cubeconv)
cubestrat = iris.load_cube(fu,'%s' % cube_name_explicit & glob_tc)
cubestrat= unrotate_pole_update_cube(cubestrat)
print cubestrat
cube=cubeconv.extract(lat_constraint & lon_constraint) + cubestrat.extract(lat_constraint & lon_constraint)
cube.rename('total_precipitation_rate')
except iris.exceptions.ConstraintMismatchError:
cube = iris.load_cube(fu, ('%s' % cube_name_explicit) & glob_tc)
cube= unrotate_pole_update_cube(cube)
cube = cube.extract(lat_constraint & lon_constraint)
# Mean at each grid point by hour of day and save
add_categorised_coord(cube, 'hour', 'time',lambda coord, x: coord.units.num2date(x).hour)
diurnal_mean_cube = cube.aggregated_by('hour', iris.analysis.MEAN)
del cube
#try:
# iris.save(diurnal_mean_cube, '%s%s/%s/%s_rainfall_hourly_mean.pp' % (pp_file_path, expmin1, experiment_id, diag))
#except Exception, e:
# print e
# pass
# Load land/sea mask
lsm = iris.load_cube(flsm, ('land_binary_mask' ) )
lsm = unrotate_pole_update_cube(lsm)
lsm=lsm.extract(lat_constraint & lon_constraint)
print lsm
sys.stdout.flush()
# For Sea and Land, mask area and calculate mean of each hour for sea/land and SAVE as numpy array
#tdmc= diurnal_mean_cube.collapsed(['grid_latitude', 'grid_longitude'], iris.analysis.MEAN)
#total_diurnal_mean_cube=[tdmc.data.data, diurnal_mean_cube.coord('hour').points+0.5]
#print total_diurnal_mean_cube
#np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), total_diurnal_mean_cube)
for s in ([0]):
nancube = np.where(lsm.data==s, diurnal_mean_cube.data, np.NaN)
maskedcube = np.ma.masked_array(nancube,np.isnan(nancube))
total_rainfall = np.mean(maskedcube.reshape(maskedcube.shape[0], (maskedcube.shape[1]*maskedcube.shape[2])), axis=1)
trnp =[total_rainfall.data, diurnal_mean_cube.coord('hour').points+0.5]
if s == 0:
# Areas of ocean
print total_rainfall
np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp)
#np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube)
if s == 1:
# Areas of land
np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp)
#np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube)
del lsm
#tdmc= diurnal_mean_cube.collapsed(['grid_latitude', 'grid_longitude'], iris.analysis.MEAN)
#total_diurnal_mean_cube=tdmc
#np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), tdmc.data.data)
#np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), ma)
|
meyersj/geotweet
|
geotweet/tests/unit/mapreduce/utils/lookup_tests.py
|
Python
|
mit
| 3,949
| 0.003798
|
import unittest
import os
from os.path import dirname
import sys
import json
from rtree import index
from . import ROOT
from geotweet.mapreduce.utils.lookup import project, SpatialLookup
testdata = os.path.join(dirname(os.path.abspath(__file__)), 'testdata')
def read(geojson):
return json.loads(open(os.path.join(testdata, geojson), 'r').read())
"""
P53000
______
| |
| |
|______| Polygon 2
P3200
______
| |
| P |
|______| Polygon 2
"""
POLYGON_1 = read('polygon_102500_1.geojson')
POLYGON_2 = read('polygon_102500_2.geojson')
POINT_WITHIN = read('point_within.geojson')
POINT_53000M = read('point_53000m.geojson')
POINT_3200M = read('point_3200m.geojson')
def init_polygon_1_index():
location = SpatialLookup()
idx = index.Rtree()
polygon = location._build_obj(POLYGON_1)
location.data_store[1] = polygon
idx.insert(1, polygon['geometry'].bounds)
location.idx = idx
return location
def init_polygon_2_index():
location = init_polygon_1_index()
polygon = location._build_obj(POLYGON_2)
location.data_store[2] = polygon
location.idx.insert(2, polygon['geometry'].bounds)
return location
class GetObjectBasic(unittest.TestCase):
def setUp(self):
self.location = init_polygon_1_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_basic(self):
self.assert_found(project(POINT_WITHIN['geometry']['coordinates']))
self.assert_none(project(POINT_3200M['geometry']['coordinates']))
self.assert_none(project(POINT_53000M['geometry']['coordinates']))
def test_buffer_none(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=0)
self.assertIsNone(found)
def test_buffer_outside_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=3000)
self.assertIsNone(found)
def test_buffer_within_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=4000)
self.assertIsNotNone(found)
class GetObjectOrder(unittest.TestCase):
def setUp(self):
self.location = init_polygon_2_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_buffer_nearest1(self):
point = project(POINT_WITHIN['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual < {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
def test_buffer_nearest2(self):
point = project(POINT_3200M['geometry']['coordinates'])
found =
|
self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual
|
< {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
if __name__ == "__main__":
unittest.main()
|
naver/hubblemon
|
redis_mon/redis_view.py
|
Python
|
apache-2.0
| 2,771
| 0.024901
|
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations un
|
der the License.
#
import os, socket, sys, time
import data_loader
from datetime import datetime
hubblemon_path = os.path.join(os.
|
path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import common.core
redis_preset = [['memory', 'memory_human', 'memory_lua', 'memory_rss'], 'mem_frag', ['cpu_user', 'cpu_sys', 'cpu_user_children', 'cpu_sys_children'],
'connections', (lambda x : x['keyspace_hits'] / (x['keyspace_hits'] + x['keyspace_misses']) * 100, 'hit ratio'), 'expired_keys', 'evicted_keys', 'cmds_processed',
['cmd_get', 'cmd_set', 'cmd_mget', 'cmd_mset'], ['cmd_del', 'cmd_expire', 'cmd_checkpoint'],
['cmd_linsert', 'cmd_lpush', 'cmd_lpop', 'cmd_llen'], ['cmd_lindex', 'cmd_lrange'],
['cmd_sadd', 'cmd_scard', 'cmd_set', 'cmd_srem'], ['cmd_sismember', 'cmd_smembers'],
['cmd_zadd', 'cmd_zcard', 'cmd_zrem'], ['cmd_zrange', 'cmd_zrank', 'cmd_zscore']]
def redis_view(path, title = ''):
return common.core.loader(path, redis_preset, title)
#
# chart list
#
redis_cloud_map = {}
last_ts = 0
def init_plugin():
print('#### redis init ########')
ret = get_chart_list({})
print(ret)
def get_chart_data(param):
#print(param)
global redis_cloud_map
type = 'redis_stat'
if 'type' in param:
type = param['type']
if 'instance' not in param or 'server' not in param:
return None
instance_name = param['instance']
server_name = param['server']
if type == 'redis_stat':
for node in redis_cloud_map[server_name]:
if node.startswith(instance_name):
results = common.core.loader(server_name + '/' + node, redis_preset, title=node)
break
return results
def get_chart_list(param):
#print(param)
global redis_cloud_map
global last_ts
ts = time.time()
if ts - last_ts >= 300:
redis_cloud_map_tmp = {}
entity_list = common.core.get_entity_list()
for entity in entity_list:
instance_list = common.core.get_table_list_of_entity(entity, 'redis_')
if len(instance_list) > 0:
redis_cloud_map_tmp[entity] = instance_list
redis_cloud_map = redis_cloud_map_tmp
last_ts = ts
if 'type' in param:
type = param['type']
return (['server', 'instance'], redis_cloud_map)
|
Nexenta/s3-tests
|
virtualenv/lib/python2.7/site-packages/isodate/tests/test_time.py
|
Python
|
mit
| 6,554
| 0.001984
|
##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Test cases for the isotime module.
'''
import unittest
from datetime import time
from isodate import parse_time, UTC, FixedOffset, ISO8601Error, time_isoformat
from isodate import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from isodate import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from isodate import TIME_HOUR
from isodate import TZ_BAS, TZ_EXT, TZ_HOUR
# the following list contains tuples of ISO time strings and the expected
# result from the parse_time method. A result of None means an ISO8601Error
# is expected.
TEST_CASES = [('232050', time(23, 20, 50), TIME_BAS_COMPLETE + TZ_BAS),
('23:20:50', time(23, 20, 50), TIME_EXT_COMPLETE + TZ_EXT),
('2320', time(23, 20), TIME_BAS_MINUTE),
('23:20', time(23, 20), TIME_EXT_MINUTE),
('23', time(23), TIME_HOUR),
('232050,5', time(23, 20, 50, 500000), None),
('23:20:50.5', time(23, 20, 50, 500000), None),
# test precision
('15:33:42.123456', time(15, 33, 42, 123456), None),
('15:33:42.1234564', time(15, 33, 42, 123456), None),
('15:33:42.1234557', time(15, 33, 42, 123456), None),
('2320,8', time(23, 20, 48), None),
('23:20,8', time(23, 20, 48), None),
('23,3', time(23, 18), None),
('232030Z', time(23, 20, 30, tzinfo=UTC),
TIME_BAS_COMPLETE + TZ_BAS),
('2320Z', time(23, 20, tzinfo=UTC), TIME_BAS_MINUTE + TZ_BAS),
('23Z', time(23, tzinfo=UTC), TIME_HOUR + TZ_BAS),
('23:20:30Z', time(23, 20, 30, tzinfo=UTC),
TIME_EXT_COMPLETE + TZ_EXT),
('23:20Z', time(23, 20, tzinfo=UTC), TIME_EXT_MINUTE + TZ_EXT),
('152746+0100', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+0100')),
TIME_BAS_COMPLETE + TZ_BAS),
('152746-0500', time(15, 27, 46,
tzinfo=FixedOffset(-5, 0, '-0500')),
TIME_BAS_COMPLETE + TZ_BAS),
('152746+01', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_BAS_COMPLETE + TZ_HOUR),
('152746-05', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_BAS_COMPLETE + TZ_HOUR),
('15:27:46+01:00', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46-05:00', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46+01', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_EXT_COMPLETE + TZ_HOUR),
('15:27:46-05', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_EXT_COMPLETE + TZ_HOUR),
('1:17:30', None, TIME_EXT_COMPLETE)]
def create_testcase(timestring, expectation, format):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestTime(unittest.TestCase):
'''
A test case template to parse an ISO time string into a time
object.
'''
def test_parse(self):
'''
Parse an ISO time string and compare it to the expected value.
'''
if expectation is None:
self.assertRaises(ISO8601Error, parse_time, timestring)
else:
result = parse_time(timestring)
self.assertEqual(result, ex
|
pectation)
def test_format(self):
'''
Take time object and create ISO string from it.
This is the reverse test to test_parse.
'''
if expectation is None:
self.assertRaises(AttributeError,
time_isoforma
|
t, expectation, format)
elif format is not None:
self.assertEqual(time_isoformat(expectation, format),
timestring)
return unittest.TestLoader().loadTestsFromTestCase(TestTime)
def test_suite():
'''
Construct a TestSuite instance for all test cases.
'''
suite = unittest.TestSuite()
for timestring, expectation, format in TEST_CASES:
suite.addTest(create_testcase(timestring, expectation, format))
return suite
# load_tests Protocol
def load_tests(loader, tests, pattern):
return test_suite()
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
miptliot/edx-platform
|
lms/djangoapps/course_api/tests/test_views.py
|
Python
|
agpl-3.0
| 8,978
| 0.002673
|
"""
Tests for Course API views.
"""
from hashlib import md5
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from nose.plugins.attrib import attr
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from ..views import CourseDetailView
from .mixins import TEST_PASSWORD, CourseApiFactoryMixin
class CourseApiTestViewMixin(CourseApiFactoryMixin):
"""
Mixin class for test helpers for Course API views
"""
def setup_user(self, requesting_user, make_inactive=False):
"""
log in the specified user and set its is_active field
"""
self.assertTrue(self.client.login(username=requesting_user.username, password=TEST_PASSWORD))
if make_inactive:
requesting_user.is_active = False
requesting_user.save()
def verify_response(self, expected_status_code=200, params=None, url=None):
"""
Ensure that sending a GET request to self.url returns the expected
status code (200 by default).
Arguments:
expected_status_code: (default 200)
params:
query parameters to include in the request. Can include
`username`.
Returns:
response: (HttpResponse) The response returned by the request
"""
query_params = {}
query_params.update(params or {})
response = self.client.get(url or self.url, data=query_params)
self.assertEqual(response.status_code, expected_status_code)
return response
@attr(shard=3)
class CourseListViewTestCase(CourseApiTestViewMixin, SharedModuleStoreTestCase):
"""
Test responses returned from CourseListView.
"""
@classmethod
def setUpClass(cls):
super(CourseListViewTestCase, cls).setUpClass()
cls.course = cls.create_course()
cls.url = reverse('course-list')
cls.staff_user = cls.create_user(username='staff', is_staff=True)
cls.honor_user = cls.create_user(username='honor', is_staff=False)
def test_as_staff(self):
self.setup_user(self.staff_user)
self.verify_response(params={'username': self.staff_user.username})
def test_as_staff_for_honor(self):
self.setup_user(self.staff_user)
self.verify_response(params={'username': self.honor_user.username})
def test_as_honor(self):
self.setup_user(self.honor_user)
self.verify_response(params={'username': self.honor_user.username})
def test_as_honor_for_explicit_self(self):
self.setup_user(self.honor_user)
self.verify_response(params={'username': self.honor_user.username})
def test_as_honor_for_staff(self):
self.setup_user(self.honor_user)
self.verify_response(expected_status_code=403, params={'username': self.staff_user.username})
def test_as_inactive_user(self):
inactive_user = self.create_user(username='inactive', is_staff=False)
self.setup_user(inactive_user, make_inactive=True)
self.verify_response(params={'username': inactive_user.username})
def test_missing_username(self):
self.setup_user(self.honor_user)
response_to_missing_username = self.verify_response(expected_status_code=200)
self.assertIsNotNone(response_to_missing_username.data) # pylint: disable=no-member
def test_not_logged_in(self):
self.client.logout()
self.verify_response()
class CourseListViewTestCaseMultipleCourses(CourseApiTestViewMixin, ModuleStoreTestCase):
"""
Test responses returned from CourseListView (with tests that modify the
courseware).
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(CourseListViewTestCaseMultipleCourses, self).setUp()
self.course = self.create_course()
self.url = reverse('course-list')
self.staff_user = self.create_user(username='staff', is_staff=True)
self.honor_user = self.create_user(username='honor', is_staff=False)
def test_filter_by_org(self):
"""Verify that CourseOverviews are filtered by the provided org key."""
self.setup_user(self.staff_user)
# Create a second course to be filtered out of queries.
alternate_course = self.create_course(
org=md5(self.course.org).hexdigest()
)
self.assertNotEqual(alternate_course.org, self.course.org)
# No filtering.
unfiltered_response = self.verify_response(params={'username': self.staff_user.username})
for org in [self.course.org, alternate_course.org]:
self.assertTrue(
any(course['org'] == org for course in unfiltered_response.data['results']) # pylint: disable=no-member
)
# With filtering.
filtered_response = self.verify_response(params={'org': self.course.org, 'username': self.staff_user.username})
self.assertTrue(
all(course['org'] == self.course.org for course in filtered_response.data['results']) # pylint: disable=no-member
)
def test_filter(self):
self.setup_user(self.staff_user)
# Create a second course to be filtered out of queries.
alternate_course = self.create_course(course='mobile', mobile_available=True)
test_cases = [
(None, [alternate_course, self.course]),
(dict(mobile=True), [alternate_course]),
(dict(mobile=False), [self.course]),
]
for filter_, expected_courses in test_cases:
params = {'username': self.staff_user.username}
if filter_:
params.update(filter_)
response = self.verify_response(params=params)
self.assertEquals(
{course['course_id'] for course in response.data['results']}, # pylint: disable=no-member
{unicode(course.id) for course in expected_courses},
"testing course_api.views.CourseListView with filter_={}".format(filter_),
)
class CourseDetailViewTestCase(CourseApiTestViewMixin, SharedModuleStoreTestCase):
"""
Test responses returned from CourseDetailView.
"""
@classmethod
def setUpClass(cls):
super(CourseDetailViewTestCase, cls).setUpClass()
cls.course = cls.create_course()
cls.hidden_course = cls.create_course(course=u'hidden', visible_to_staff_only=True)
cls.url = reverse('course-detail', kwargs={'course_key_string': cls.course.id})
cls.hidden_url = reverse('course-detail', kwargs={'course_key_string': cls.hidden_course.id})
cls.nonexistent_url = reverse('course-detail', kwargs={'course_key_string': 'edX/nope/Fall_2014'})
cls.staff_user = cls.create_user(username='staff', is_staff=True)
cls.honor_user = cls.create_user(username='honor', is_staff=Fals
|
e)
def test_as_honor(self):
self.setup_user(self.honor_user)
self.verify_response(params={'username': self.honor_user.username})
def test_as_honor_for_staff(self):
self.setup_user(self.honor_user)
self.verify_response(expected_status_code=403, params={'username': self.staff_user.username})
def test_as_staff(self):
self.setup_user(self.staff_user)
self.verify_response(params={'username': s
|
elf.staff_user.username})
def test_as_staff_for_honor(self):
self.setup_user(self.staff_user)
self.verify_response(params={'username': self.honor_user.username})
def test_as_anonymous_user(self):
self.verify_response(expected_status_code=200)
def test_as_inactive_user(self):
inactive_user = self.create_user(username='inactive', is_staff=False)
self.setup_user(inactive_user, make_inactive=True)
self.verify_response(params={'username': inactive_user.username})
def test_hidden_course_as_honor(self):
self.setup_user(self.honor_user)
self.verify_response(
expected_status_code=404, url=self.hidden_url, params={'username': self.honor_user.username}
)
def test_hidden_course_as_staff(self):
self.setup_user(self.staff
|
spikeekips/ampy
|
ampy/async.py
|
Python
|
mit
| 7,935
| 0.004789
|
"""
Asyncore-based implementation of the AMP protocol.
"""
import socket
import threading
import asyncore, asynchat, socket, struct, sys
import defer, ampy
class AMP_Server(asyncore.dispatcher):
def __init__(self, port, bindHost="0.0.0.0"):
self.port = port
self.bindHost = bindHost
asyncore.dispatcher.__init__(self) # we get added to the global asyncore "map" here
def start_listening(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bind((self.bindHost, self.port))
self.listen(10)
def loop(self):
asyncore.loop()
def stop(self):
"""
I don't think this will actually cause any existing connections to be closed, and if it doesn't
then the asyncore loop won't terminate. Kinda lame.
"""
self.close()
def handle_accept(self):
conn, addr = self.accept()
self.buildProtocol(conn, addr)
def buildProtocol(self, conn, addr):
"""
Override this to instantiate your own AMP_Protocol subclass
"""
pass
def synchronized(func):
"""
Decorator to synchronize access to a method through self.lock
"""
def _synchronized(self, *args, **kw):
self.lock.acquire()
try:
return func(self, *args, **kw)
finally:
self.lock.release()
return _synchronized
KEY_LEN_READ, KEY_DATA_RE
|
AD, VAL_LEN_READ, VAL_DATA_READ = range(4)
class AMP_Protocol(asynchat.async_chat):
current_key = None
counter = 0
box = None
responders = {}
def __init__(self, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.addr = addr
self.ibuffer = []
self.obuffer = ""
self.set_terminator(2)
self.state = KEY_LEN_READ
self.box = {}
self.awaitingResponse = {}
self.lock = threading.Lock()
def re
|
gisterResponder(self, command, responder):
self.responders[command.commandName] = (command, responder)
def collect_incoming_data(self, data):
"""Buffer the data"""
self.ibuffer.append(data)
def found_terminator(self):
# handle buffered data and transition state
if self.state == KEY_LEN_READ:
# keys should never be longer that 255 bytes, but we aren't actually
# enforcing that here. todo?
key_len = struct.unpack('!H', "".join(self.ibuffer))[0]
self.ibuffer = []
if key_len == 0: # a null key length (two NULL bytes) indicates the termination of this AMP box
box = self.box
self.current_key = None # no need to keep this around until the next box overwrites it
self.val_data = None # no need to keep this around until the next box overwrites it
self.box = {} # new data belongs to a new AMP box
self.processFullMessage(box)
self.set_terminator(2)
else:
self.set_terminator(key_len)
self.state = KEY_DATA_READ
elif self.state == KEY_DATA_READ:
self.current_key = "".join(self.ibuffer)
self.ibuffer = []
self.set_terminator(2) # collect 2 bytes (length of the value that this key corresponds to
self.state = VAL_LEN_READ
elif self.state == VAL_LEN_READ:
val_len = struct.unpack('!H', "".join(self.ibuffer))[0]
self.ibuffer = []
self.set_terminator(val_len)
self.state = VAL_DATA_READ
elif self.state == VAL_DATA_READ:
val_data = "".join(self.ibuffer)
self.ibuffer = []
self.box[self.current_key] = val_data
self.state = KEY_LEN_READ # start over again
self.set_terminator(2)
def processFullMessage(self, box):
if ampy.COMMAND in box:
cmdName = box[ampy.COMMAND]
command, handler = self.responders.get(cmdName, (None, None))
askKey = box.get(ampy.ASK, None)
if not handler:
if askKey:
resp = [ampy.ERROR, askKey,
ampy.ERROR_CODE, ampy.UNHANDLED_ERROR_CODE,
ampy.ERROR_DESCRIPTION, "No handler for command"]
self.push(''.join(ampy.insertPrefixes((resp))))
else:
kw = command.deserializeRequest(box)
if askKey:
defer.maybeDeferred(handler, **kw).addCallbacks(self._cb_gotResponse, self._eb_gotResponseError,
callbackArgs=(command, askKey), errbackArgs=(command, askKey))
else:
handler(**kw)
elif ampy.ANSWER in box:
answerKey = box[ampy.ANSWER]
command, deferred = self.awaitingResponse.pop(answerKey, (None,None) )
if command:
kw = command.deserializeResponse(box)
deferred.callback(kw)
else:
sys.stderr.write("Got answer key %s, but we weren't waiting for it! weird!\n" % (answerKey,))
elif ampy.ERROR in box:
errorKey = box[ampy.ERROR]
command, deferred = self.awaitingResponse.pop(errorKey, (None,None) )
if command:
e = ampy.AMPError(box[ampy.ERROR_CODE], box[ampy.ERROR_DESCRIPTION])
deferred.errback(e)
else:
sys.stderr.write("Got error key %s, but we weren't waiting for it! weird!\n" % (errorKey,))
else:
sys.stderr.write("Insane AMP packet!\n")
def _cb_gotResponse(self, kw, command, askKey):
dataList = [ampy.ANSWER, askKey]
command.serializeResponse(dataList, kw)
self.push(''.join(ampy.insertPrefixes(dataList)))
def _eb_gotResponseError(self, f, command, askKey):
key = f.check(*command.errors.keys())
if key:
code = command.errors[key]
descr = "" # TODO what should go here?
else:
sys.stderr.write("Unhandled exception raised in AMP Command handler:\n")
f.printTraceback()
code = ampy.UNKNOWN_ERROR_CODE
descr = "Unknown Error"
resp = [ampy.ERROR, askKey,
ampy.ERROR_CODE, code,
ampy.ERROR_DESCRIPTION, descr]
self.push(''.join(ampy.insertPrefixes(resp)))
def callRemote(self, command, **kw):
"""
Asyncronously call a remote AMP command.
"""
# compose packet
dataList = [ampy.COMMAND, command.commandName]
retVal = None
# remember that we sent this command if a response is expected
if command.requiresAnswer:
# XXX TODO a race condition exists between the two lines below
askKey = str(self.counter)
self.counter += 1
retVal = defer.Deferred()
self.awaitingResponse[askKey] = (command, retVal)
dataList.extend( [ampy.ASK, askKey] )
#for kv in COMMAND, command.commandName, ASK, askKey:
# dataList.append(struct.pack('!H', len(kv)))
# dataList.append(kv)
command.serializeRequest(dataList, kw)
ampy.insertPrefixes(dataList)
data = ''.join(dataList)
# write packet
self.push(data)
return retVal
callRemote = synchronized(callRemote)
# synchronize access to these methods since calls to this singleton object
# can come from the main asyncore thread and also from the FXCM API thread(s)
def handle_read(self):
return asynchat.async_chat.handle_read(self)
handle_read = synchronized(handle_read)
def initiate_send(self):
return asynchat.async_chat.initiate_send(self)
initiate_send = synchronized(initiate_send)
|
vanceeasleaf/aces
|
aces/runners/phonopy.py
|
Python
|
gpl-2.0
| 19,132
| 0
|
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-16 20:09:09
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-27 16:02:34
from aces.tools import mkdir, mv, cd, cp, mkcd, shell_exec,\
exists, write, passthru, toString, pwd, debug, ls, parseyaml
import aces.config as config
from aces.binary import pr
from aces.runners import Runner
from aces.graph import plot, series, pl, fig
from aces.script.vasprun import exe as lammpsvasprun
import aces.script.vasprun as vasprun
import time
import numpy as np
from aces.io.phonopy.bandplot import plotband, plotbanddos
from aces.io.phonopy.meshyaml import meshyaml
from aces.io.phonopy.fc import readfc2
from aces.pbs.jobManager import jobManager, th, pbs
from aces.io.vasp import writePOTCAR, writevasp, parseVasprun
from ase import io
from lxml import etree
from scanf import sscanf
class runner(Runner):
def minimizePOSCAR(self):
m = self.m
if m.engine == "lammps":
m.dump2POSCAR(m.home + '/minimize/range', rotate=True)
elif m.engine == "vasp":
cp(m.home + '/minimize/CONTCAR', 'POSCAR')
def optimize(self):
mkcd('optimize')
cp('../minimize/POSCAR', '.')
atoms = io.read('POSCAR')
for i in range(100):
dir = "%i" % i
mkcd(dir)
writevasp(atoms)
forces, stress, energy = self.energyForce()
pos = atoms.get_scaled_positions()
pos += forces * 0.01
def energyForce(self):
self.getVaspRun_vasp()
forces = parseVasprun('forces')
stress = parseVasprun('stress')
c = shell_exec("grep TOTEN OUTCAR|tail -1")
energy = sscanf(c, "free energy TOTEN = %f eV")[0]
return forces, stress, energy
def cs(self):
from aces.cs import runner
runner(NAH=2).run()
self.check('csfc2')
def check1(self, filename='FORCE_CONSTANTS'):
ref = io.read('SPOSCAR')
fc2 = readfc2(filename)
np.set_printoptions(precision=2, suppress=True)
files = ['dir_POSCAR-001']
vasprunxml = "dir_SPOSCAR/vasprun.xml"
if exists(vasprunxml):
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces0 = parseVasprun(vasprun, 'forces')
print(forces0.max())
else:
forces0 = 0.0
for file in files:
print(file)
POSCAR = 'dirs/%s/POSCAR' % file
vasprunxml = "dirs/%s/vasprun.xml" % file
atoms = io.read(POSCAR)
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces = parseVasprun(vasprun, 'forces') - forces0
print(np.abs(f).max(), "\n")
print(np.abs(forces - f).max())
print(np.allclose(f, forces, atol=1e-2))
def check(self, filename='FORCE_CONSTANTS'):
ref = io.read('SPOSCAR')
files = shell_exec("ls dirs").split('\n')
fc2 = readfc2(filename)
np.set_printoptions(precision=2, suppress=True)
vasprunxml = "dir_SPOSCAR/vasprun.xml"
if exists(vasprunxml):
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces0 = parseVasprun(vasprun, 'forces')
print(forces0.max())
else:
forces0 = 0.0
for file in files:
print(file)
POSCAR = 'dirs/%s/POSCAR' % file
vasprunxml = "dirs/%s/vasprun.xml" % file
atoms = io.read(POSCAR)
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
vasprun = etree.iterparse(vasprunxml, tag='varray')
forces = parseVasprun(vasprun, 'forces') - forces0
print(np.abs(f).max(), "\n")
print(np.abs(forces - f).max())
print(np.allclose(f, forces, atol=1e-2))
def stub(self):
files = shell_exec("ls dirs").split('\n')
files = map(lambda x: x.replace('dir_', ''), files)
fc2 = readfc2('fc2')
for file in files:
ref = io.read('SPOSCAR')
a = 'dirs/dir_' + str(file)
atoms = io.read(a + "/POSCAR")
u = atoms.positions - ref.positions
f = -np.einsum('ijkl,jl', fc2, u)
forces = ""
for force in f:
forces += "<v> %f %f %f </v>\n" % tuple(force)
vasprun = '<root><calculation><varray name="forces" >\n'
vasprun += forces
vasprun += '</varray></calculation></root>\n'
write(vasprun, a + "/vasprun.xml")
def force_constant(self, files):
cmd = config.phonopy + "-f "
if exists("dir_SPOSCAR/vasprun.xml"):
cmd = config.phonopy + "--fz dir_SPOSCAR/vasprun.xml "
for file in files:
dir = "dirs/dir_" + file
cmd += dir + '/vasprun.xml '
# generate FORCE_SETS
passthru(cmd)
m = self.m
# Create FORCE_CONSTANTS
passthru(config.phonopy + "--tolerance=1e-4 --writefc --dim='%s'" %
(m.dim))
def fc2(self):
files = shell_exec("ls dirs").split('\n')
files = map(lambda x: x.replace('dir_', ''), files)
# when the number of files >1000, the order is wrong ,POSCAR-001,
# POSCAR-1500 ,POSCAR-159
files.sort(lambda x, y: int(x.split('-')[1]) - int(y.split('-')[1]))
self.force_constant(files)
def generate_meshconf(self):
# generate mesh.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
MP = %s
EIGENVECTORS=.TRUE.
FORCE_CONSTANTS = READ
MESH_SYMMETRY = .FALSE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), ' '.join(map(str, m.kpoints)),
toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'mesh.conf')
def generate_vconf(self):
# generate v.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
MP = %s
FORCE_CONSTANTS = READ
MESH_SYMMETRY = .FALSE.
GROUP_VELOCITY=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), ' '.join(map(str, m.kpoi
|
nts)),
toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'v.conf')
def generate_qconf(self, q):
# g
|
enerate q.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
FORCE_CONSTANTS = READ
EIGENVECTORS=.TRUE.
QPOINTS=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'q.conf')
s = "%s\n" % len(q)
for qq in q:
s += "%s\n" % toString(qq)
write(s, 'QPOINTS')
def generate_vqconf(self, q):
# generate q.conf
m = self.m
mesh = """DIM = %s
ATOM_NAME = %s
FORCE_CONSTANTS = READ
GROUP_VELOCITY=.TRUE.
QPOINTS=.TRUE.
PRIMITIVE_AXIS = %s
""" % (m.dim, ' '.join(m.elements), toString(m.premitive.flatten()))
mesh = mesh.replace(r'^\s+', '')
write(mesh, 'q.conf')
s = "%s\n" % len(q)
for qq in q:
s += "%s\n" % toString(qq)
write(s, 'QPOINTS')
def generate_supercells(self):
m = self.m
# generate supercells
passthru(config.phonopy + "--tolerance=1e-4 -d --dim='%s'" % (m.dim))
def writeINCAR(self):
m = self.m
npar = 1
for i in range(1, int(np.sqrt(m.cores)) + 1):
if m.cores % i == 0:
npar = i
if m.ispin:
ispin = "ISPIN=2"
else:
ispin = ""
if m.soc:
soc = "LSORBIT=T"
else:
soc = ""
if m.isym:
sym = "ISYM = 1"
else:
sym = "ISYM = 0"
s = """SYSTEM=calculate energy
PREC = High
IBRION = -1
ENCUT = %f
EDIFF = 1.0e-8
|
eigoshimizu/Genomon
|
scripts/genomon_pipeline/dna_resource/markduplicates.py
|
Python
|
gpl-2.0
| 742
| 0.004043
|
#! /usr/bin/env python
from genomon_pipeline.stage_task import *
class Markduplicates(Stage_task):
task_name = "markduplicates"
script_template = """
#!/bin/bash
#
# Set SGE
#
#$ -S /bin/bash # set shell in UGE
#$ -cwd # execute at the submitted dir
pwd # print current working directory
hostname # print hostname
date # print date
set -xv
se
|
t -o pipefail
{bioba
|
mbam}/bammarkduplicates M={out_prefix}.metrics tmpfile={out_prefix}.tmp markthreads=2 rewritebam=1 rewritebamlevel=1 index=1 md5=1 {input_bam_files} O={out_bam}
"""
def __init__(self, qsub_option, script_dir):
super(Markduplicates, self).__init__(qsub_option, script_dir)
|
avinash-n/work-1
|
python_program/day_3/tic_tac_toe.py
|
Python
|
gpl-3.0
| 2,433
| 0.085902
|
import os
print "Tic Tac Toe".center(50,'-')
l = [['_','_','_'],['_','_','_'],[
|
'_','_','_']]
def show(l) :
os.system("clear")
for i in range(0,3) :
for j in range(0,3) :
print l[i][j]," ",
print "\n"
def base() :
print "provided numbers for provided positions"
num = 0
for i in range (0,3) :
for j in range(0,3) :
print num," ",
num += 1
print "\n"
count = 1
while count<=9 :
count += 1
show(l)
base()
print "Its player 1's turn"
pos = input("Enter required position you want to place")
i = pos/3
j = pos%3
while l[i][j] != '_' :
print "
|
Its already filled"
pos = input("Enter required position you want to place")
i = pos/3
j = pos%3
l[i][j] = 1
check = 0
for t in range(0,3) :
if l[t][j] == 1 :
check+=1
if check == 3 :
show(l)
print "player 1 won"
exit()
check = 0
for t in range(0,3) :
if l[i][t] == 1 :
check+=1
if check == 3 :
show(l)
print "player 1 won "
exit()
if i-j == 1 or j-i == 1 :
pass
else :
if (i == j) or (i == 1 and j == 1 ) :
check = 0
for t in range(0,3) :
if l[t][t] == 1 :
check+=1
if check == 3 :
show(l)
print "player 1 won"
exit()
if ( i == 1 and j == 1 ) or (i == 0 and j==2) or (j == 0 and i == 2) :
check = 0
for t in range(0,3) :
if l[t][2-t] == 1 :
check += 1
if check == 3 :
show(l)
print "player 1 won"
exit()
if count >= 9 :
break
count += 1
show(l)
print "Its player 2's turn"
base()
pos = input("Enter the required position you want to place")
i = pos/3
j = pos%3
while l[i][j] != '_' :
print "Its already filled"
pos = input("Enter required position you want to place")
i = pos/3
j = pos%3
l[i][j] = 0
check = 0
for t in range(0,3) :
if l[t][j] == 0 :
check += 1
if check == 3 :
show(l)
print "Player 2 won"
exit()
check = 0
for t in range(0,3) :
if l[i][t] == 0 :
check += 1
if check == 3 :
show(l)
print "player 2 won "
exit()
if i-j == 1 or j-i == 1 :
pass
else :
if (i == j) or (i == 1 and j == 1 ) :
check = 0
for t in range(0,3) :
if l[t][t] == 0 :
check += 1
if check == 3 :
show(l)
print "player 2 won"
exit()
if ( i == 1 and j == 1 ) or ( (i == 0 and j==2) or (j == 0 and i==2 )) :
check = 0
for t in range(0,3) :
if l[t][2-t] == 0 :
check += 1
if check == 3 :
show(l)
print "player 2 won"
exit()
show(l)
print "Match draw"
|
velp/nocexec
|
setup.py
|
Python
|
mit
| 1,793
| 0.001115
|
# -*- coding: utf-8 -*-
"""
NOCExec
-----------------------
Library for automation of management and configuration of network devices
"""
import sys
import os
from setuptools import setup
if sys.version_info < (2, 7):
raise Exception("NOCExec requires Python 2.7 or higher.")
# Hard linking doesn't work inside VirtualBox shared folders. This means that
# you can't use tox in a directory that is being shared with Vagrant,
# since tox relies on `python setup.py sdist` which uses hard links. As a
# workaround, disable hard-linking if setup.py is a descendant of /vagrant.
# See
# https://stackoverflow.com/questions/7719380/python-setup-py-sdist-error-operation-not-permitted
# for more details.
if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':
del os.link
setup(
name="NOCExec",
version="0.2a",
packages=["nocexec"],
author="Vadim Ponomarev",
author_email="velizarx@gmail.com",
url='https://github.com/velp/nocexec',
license="MIT",
description='Library for automation of management and configuration of network devices.',
long_description=__doc__,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Environment :: Plugins',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=["pexpect", "ncclient>=0.5"],
tests_require=["mock>=1.0"],
extras_require={'doc
|
s': ["Sphinx>=1.
|
2.3", "alabaster>=0.6.3"]}
)
|
unaguil/hyperion-ns2
|
experiments/measures/graphsearch/StartedCompositions.py
|
Python
|
apache-2.0
| 448
| 0.029018
|
from measures.generic.GenericMeasure import GenericMeasure
import measures.generic.Units as Units
class StartedCompositions(GenericMeasure):
"""Total number of started searches"""
def __init__(self, period, simulationTime):
G
|
enericMeasure.__init__(self, r'DEB
|
UG .*? - Peer [0-9]+ started composition search \(.*?\).*?([0-9]+\,[0-9]+).*?', period, simulationTime, Units.COMPOSITIONS)
def parseLine(self, line):
self.parseInc(line)
|
catholabs/esengine
|
esengine/mapping.py
|
Python
|
mit
| 4,930
| 0
|
import collections
class Mapping(object):
"""
Used to generate mapping based in document field definitions
>>> class Obj(Document):
... name = StringField()
And you can use a Mapping to refresh mappings
(use in cron jobs or call periodically)
obj_mapping = Mapping(Obj)
obj_mapping.save()
Adicionally this class handle index settings configuration. However this
operation must be done at elasticsearch index creation.
"""
def __init__(self, document_class=None, enable_all=True):
self.document_class = document_class
self.enable_all = enable_all
def _generate(self, doc_class):
"""
Generate the mapping acording to doc_class.
Args:
doc_class: esengine.Document object containing the model to be
mapped to elasticsearch.
"""
m = {
doc_class._doctype: {
"_all": {"enabled": self.enable_all},
"properties": {
field_name: field_instance.mapping
for field_name, field_instance in doc_class._fields.items()
if field_name != "id"
}
}
}
return m
def generate(self):
return self._generate(self.document_class)
def save(self, es=None):
"""
Save the mapping to index.
Args:
es: elasticsearch client intance.
"""
es = self.document_class.get_es(es)
if not es.indices.exists(index=self.document_class._index):
return es.indices.create(
index=self.document_class._index,
body={"mappings": self.generate()}
)
else:
return es.indices.put_mappi
|
ng(
doc_type=self.document_class._doctype,
index=self.document_class._index,
body=self.generate()
)
def build_configuration(self, models_to_mapping, custom_settings, es=None):
"""
Build request body to add custom settings (filters, analizers, etc) to index.
Build request body to add custom settings, like filters and analizers,
to index.
A
|
rgs:
models_to_mapping: A list with the esengine.Document objects that
we want generate mapping.
custom_settings: a dict containing the configuration that will be
sent to elasticsearch/_settings (www.elastic.co/guide/en/
elasticsearch/reference/current/indices-update-settings.html)
es: elasticsearch client intance.
""" # noqa
indexes = set()
configuration = {}
mapped_models = [x for x in models_to_mapping]
for model in mapped_models:
indexes.add(model._index)
es = model.get_es(es)
for index in indexes:
if es.indices.exists(index=index):
msg = 'Settings are supported only on index creation'
raise ValueError(msg)
mappings_by_index = collections.defaultdict(dict)
for model in mapped_models:
mapping = self._generate(model)
mappings_by_index[model._index].update(mapping)
for index, mappings in mappings_by_index.items():
settings = {
"settings": custom_settings,
"mappings": mappings
}
configuration[index] = settings
return configuration
def configure(self, models_to_mapping, custom_settings=None, es=None):
"""
Add custom settings like filters and analizers to index.
Add custom settings, like filters and analizers, to index. Be aware
that elasticsearch only allow this operation on index creation.
Args:
models_to_mapping: A list with the esengine.Document objects that
we want generate mapping.
custom_settings: a dict containing the configuration that will be
sent to elasticsearch/_settings (www.elastic.co/guide/en/
elasticsearch/reference/current/indices-update-settings.html)
es: elasticsearch client intance.
"""
if not isinstance(models_to_mapping, collections.Iterable):
raise AttributeError('models_to_mapping must be iterable')
if custom_settings:
for model in models_to_mapping:
es = model.get_es(es)
if es:
break
configurations = self.build_configuration(
models_to_mapping,
custom_settings,
es
)
for index, settings in configurations.items():
es.indices.create(index=index, body=settings)
else:
mapped_models = [x for x in models_to_mapping]
for model in mapped_models:
model.put_mapping()
|
VerticalMediaProjects/cmsplugin-photologue-pro
|
cmsplugin_photologue_pro/sitemaps.py
|
Python
|
bsd-3-clause
| 464
| 0
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.contrib.sitemaps import
|
Sitemap
from photologue.models import Gallery
class AlbumSitemap(Sitemap):
changefreq = "monthly"
priority = 0.5
def items(self):
return Gallery.objects.filter(is_public=True)
def lastmod(self, obj):
return obj.date_added
def location(self, obj):
|
return reverse('photologue_album', kwargs={'album': obj.pk})
|
makielab/django-oscar
|
sites/sandbox/apps/offers.py
|
Python
|
bsd-3-clause
| 584
| 0
|
from oscar.apps.offer import models
class ChangesOwnerName(models.Benefit):
class Meta:
proxy = True
def apply(self, basket, condition, offer=None):
condition.consume_items(basket, ())
|
return models.PostOrderAction(
"You will have your name changed to Barry!")
def apply_deferred(self, basket):
if basket.owner:
basket.owner.first_name = "Barry"
basket.owner.save()
return "Your name has bee
|
n changed to Barry!"
@property
def description(self):
return "Changes owners name"
|
concentricsky/badgecheck
|
openbadges/verifier/actions/utils.py
|
Python
|
apache-2.0
| 67
| 0
|
import uuid
d
|
ef generate_task_key():
return uui
|
d.uuid4().hex
|
srivatsan-ramesh/HDMI-Source-Sink-Modules
|
hdmi/cores/primitives/dram16xn.py
|
Python
|
mit
| 2,112
| 0.00142
|
from myhdl import block
from hdmi.cores.primitives import ram16x1d
inst_count = 0
@block
def dram16xn(data_in, address, address_dp, write_enable, clock,
o_data_out, o_data_out_dp, data_width=30):
"""
Implements a Distributed SelectRAM with Dual port 16 x N-bit.
It will be replaced by a verilog code which makes use of the xilinx primitive
RAM16X1D during conversion.
Args:
data_in: The input data
address: Single port address
address
|
_dp: Dual port address
write_enable: write enable for the RAM16X1D
clock: write clock
o_data_out: single port output
o_data_out_dp: dual port output
data_width: number of words stored in the DRAM
Returns:
myhdl.instances() : A list of myhdl instances.
"""
global inst_count
ram16x1d_inst = ram16x1d(data_in, write_enable, clock, address, address_dp, o_data_out, o_data_out_dp, data_width)
inst_count
|
+= 1
return ram16x1d_inst
dram16xn.verilog_code = """
genvar i_$inst_count;
generate
for(i_$inst_count = 0 ; i_$inst_count < $data_width ; i_$inst_count = i_$inst_count + 1) begin : dram16s_$inst_count
RAM16X1D i_RAM16X1D_U_$inst_count(
.D($data_in[i_$inst_count]), //insert input signal
.WE($write_enable), //insert Write Enable signal
.WCLK($clock), //insert Write Clock signal
.A0($address[0]), //insert Address 0 signal port SPO
.A1($address[1]), //insert Address 1 signal port SPO
.A2($address[2]), //insert Address 2 signal port SPO
.A3($address[3]), //insert Address 3 signal port SPO
.DPRA0($address_dp[0]), //insert Address 0 signal dual port DPO
.DPRA1($address_dp[1]), //insert Address 1 signal dual port DPO
.DPRA2($address_dp[2]), //insert Address 2 signal dual port DPO
.DPRA3($address_dp[3]), //insert Address 3 signal dual port DPO
.SPO($o_data_out[i_$inst_count]), //insert output signal SPO
.DPO($o_data_out_dp[i_$inst_count]) //insert output signal DPO
);
end
endgenerate
"""
|
dorey/pyxform
|
pyxform/instance.py
|
Python
|
bsd-2-clause
| 3,121
| 0.00769
|
from xform_instance_parser import parse_xform_instance
class SurveyInstance(object):
def __init__(self, survey_object, **kwargs):
self._survey = survey_object
self.kwargs = kwargs #not sure what might be passed to this
#does the survey object provide a way to get the key dicts?
self._keys = [c.name for c in self._survey.children]
self._name = self._survey.name
self._id = self._survey.id_string
# get xpaths
# - prep for xpaths.
self._survey.xml()
self._xpaths = self._survey._xpath.values()
#see "answers(self):" below for explanation of this dict
self._answers = {}
self._orphan_answers = {}
def keys(self):
return self._keys
def xpaths(self):
#originally thought of having this method get the xpath stuff
#but survey doesn't like when xml() is called multiple times.
return self._xpaths
def answer(self, name=None, value=None):
if name is None:
raise Exception("In answering, name must be given")
#ahh. this is horrible, but we need the xpath dict in survey to be up-to-date
#...maybe
# self._survey.xml()
if name in self._survey._xpath.keys():
self._answers[name] = value
else:
self._orphan_answers[name] = value
def to_json_dict(self):
children = []
for k, v in self._answers.items():
children.append({'node_name':k, 'value':v})
return {
'node_name':
|
self._name,
'id': self._id,
'children': child
|
ren
}
def to_xml(self):
"""
A horrible way to do this, but it works (until we need the attributes pumped out in order, etc)
"""
open_str = """<?xml version='1.0' ?><%s id="%s">""" % (self._name, self._id)
close_str = """</%s>""" % self._name
vals = ""
for k, v in self._answers.items():
vals += "<%s>%s</%s>" % (k, str(v), k)
output = open_str + vals + close_str
return output
def answers(self):
"""
This returns "_answers", which is a dict with the key-value
responses for this given instance. This could be pumped to xml
or returned as a dict for maximum convenience (i.e. testing.)
"""
return self._answers
def import_from_xml(self, xml_string_or_filename):
import os.path
if os.path.isfile(xml_string_or_filename):
xml_str = open(xml_string_or_filename).read()
else:
xml_str = xml_string_or_filename
key_val_dict = parse_xform_instance(xml_str)
for k, v in key_val_dict.items():
self.answer(name=k, value=v)
def __unicode__(self):
orphan_count = len(self._orphan_answers.keys())
placed_count = len(self._answers.keys())
answer_count = orphan_count + placed_count
return "<Instance (%d answers: %d placed. %d orphans)>" % (answer_count, placed_count, orphan_count)
|
Julian/giraffe
|
giraffe/tests/test_core.py
|
Python
|
mit
| 13,956
| 0.00172
|
import unittest
import giraffe as c
import giraffe.exceptions as exc
def make_graph_tst(cls):
class Test(unittest.TestCase):
def test_eq_ne(self):
g = cls()
g2 = cls()
self.assertEqual(g, g2)
self.assertEqual(g2, g)
g.add_vertex(1)
self.assertNotEqual(g, g2)
g2.add_vertex(1)
self.assertEqual(g, g2)
self.assertEqual(g2, g)
g.add_edge(1, 2)
self.assertNotEqual(g, g2)
g2.add_edg
|
e(1, 2)
self.assertEqual(g, g2)
self.assertEqual(g2, g)
def te
|
st_le_lt_gt_ge(self):
g = cls()
g2 = cls()
self.assertLessEqual(g, g2)
self.assertLessEqual(g2, g)
self.assertGreaterEqual(g, g2)
self.assertGreaterEqual(g2, g)
g.add_vertices({1, 2, 3})
g.add_edges({(1, 2), (2, 3)})
self.assertLess(g2, g)
self.assertGreater(g, g2)
g2.add_vertices({1, 2, 3})
g2.add_edges({(2, 3)})
self.assertLess(g2, g)
self.assertGreater(g, g2)
g2.add_vertex(4)
self.assertFalse(g <= g2)
self.assertFalse(g >= g2)
def test_getitem(self):
o = object()
g = cls(edges=[(0, o)])
self.assertIn(o, g[0])
def test_iter(self):
v = {object() for _ in range(5)}
g = cls(v)
self.assertEqual(set(iter(g)), v)
def test_str(self):
g = cls()
g.name = "Test"
self.assertEqual(g.name, "Test")
self.assertEqual(str(g), "Test")
def test_from_graph(self):
g = cls(edges={(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)})
self.assertEqual(g, cls.from_graph(g))
def test_copy(self):
g = cls(edges={(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)})
copy = g.copy()
self.assertEqual(g, copy)
self.assertIsNot(g, copy)
def test_no_such_vertex(self):
g = cls()
self.assertRaises(exc.NoSuchVertex, g.__getitem__, 0)
def test_add_vertex(self):
g = cls()
self.assertFalse(g)
self.assertEqual(len(g), 0)
self.assertEqual(g.order, 0)
self.assertFalse(g.has_vertex(1))
self.assertNotIn(1, g)
g.add_vertex(1)
self.assertTrue(g)
self.assertEqual(g.order, 1)
self.assertEqual(len(g), 1)
self.assertTrue(g.has_vertex(1))
self.assertIn(1, g)
def test_add_vertices(self):
g = cls()
g.add_vertices(range(5))
for i in range(5):
self.assertIn(i, g)
def add_vertex_existing(self):
o, p = object(), object()
g = cls((o, p))
g.add_vertices([o, p])
self.assertIn(o, g)
self.assertIn(p, g)
def test_union(self):
v, e = set(range(1, 4)), {(1, 3), (2, 3)}
v2, e2 = set(range(2, 5)), {(2, 3), (2, 4), (1, 4)}
g = cls(v, e)
g2 = cls(v2, e2)
u = g | g2
u2 = g.union(g2)
u3 = g.union(g2, g2)
self.assertTrue(u == u2 == u3)
self.assertEqual(u.vertices, v | v2)
self.assertEqual(u.edges, e | e2)
def test_intersection(self):
v, e = set(range(1, 4)), {(1, 3), (2, 3)}
v2, e2 = set(range(2, 5)), {(2, 3), (2, 4), (3, 4)}
g = cls(v, e)
g2 = cls(v2, e2)
u = g & g2
u2 = g.intersection(g2)
u3 = g.intersection(g2, g2)
self.assertTrue(u == u2 == u3)
self.assertEqual(u.vertices, v & v2)
self.assertEqual(u.edges, e & e2)
def test_difference(self):
v, e = set(range(1, 5)), {(1, 3), (1, 4), (2, 3), (2, 4), (3, 4)}
g = cls(v, e)
u = g - {2, 4}
u2 = g.difference({2, 4})
u3 = g.difference({2}, {4})
self.assertTrue(u == u2 == u3)
self.assertEqual(u.vertices, {1, 3})
self.assertEqual(u.edges, {(1, 3)})
def test_get_induced_subgraph(self):
v, e = set(range(1, 4)), {(1, 3), (1, 4), (2, 4), (2, 3)}
g = cls(v, e)
i = g.get_subgraph_on((1, 2, 4))
self.assertEqual(i.vertices, {1, 2, 4})
self.assertEqual(i.edges, {(1, 4), (2, 4)})
def test_remove_vertex(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
g.remove_vertex(5)
self.assertEqual(g.order, 5)
self.assertEqual(g.edges, e)
g.remove_vertex(1)
self.assertEqual(g.order, 4)
self.assertEqual(g.edges, {(2, 3), (3, 4)})
def test_remove_vertices(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
g.remove_vertices((2, 3))
self.assertEqual(g.edges, {(0, 1), (1, 4)})
def test_remove_vertex_nonexistent(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
self.assertRaises(exc.NoSuchVertex, g.remove_vertex, 10)
self.assertRaises(exc.NoSuchVertex, g.remove_vertices, (8, 10))
def test_remove_vertex_atomic(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
with self.assertRaises(exc.NoSuchVertex):
g.remove_vertices((2, 10))
self.assertIn(2, g.vertices)
self.assertEqual(g.edges, e)
def test_remove_edge(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
g.remove_edge(2, 3)
self.assertEqual(g.edges, e - {(2, 3)})
self.assertEqual(g.size, 4)
def test_remove_edges(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
g.remove_edges([(2, 3), (3, 4)])
self.assertEqual(g.edges, {(0, 1), (1, 2), (1, 4)})
def test_remove_edge_nonexistent(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
self.assertRaises(exc.NoSuchEdge, g.remove_edge, 1, 5)
self.assertRaises(exc.NoSuchEdge, g.remove_edges, [(0, 1), (1, 3)])
def test_remove_edge_atomic(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = cls(range(6), e)
with self.assertRaises(exc.NoSuchEdge):
g.remove_edges([(0, 1), (2, 10)])
self.assertEqual(g.edges, e)
return Test
class TestGraph(make_graph_tst(c.Graph)):
@unittest.expectedFailure
def test_is_simple(self):
g = c.Graph()
self.assertFalse(g.is_directed)
self.assertFalse(g.is_multigraph)
def test_from_adjacency_map(self):
g = c.Graph.from_adjacency_map({1 : [2, 3], 2 : [1, 4], 4 : [5]})
self.assertEqual(g.vertices, set(range(1, 6)))
e = g.edges
f = e - {(1, 2), (2, 1)}
self.assertTrue((1, 2) in e or (2, 1) in e)
self.assertEqual(f, {(1, 3), (2, 4), (4, 5)})
def test_add_edge(self):
o, p = object(), object()
g = c.Graph((o, p))
self.assertEqual(g.size, 0)
self.assertFalse(g.has_edge(o, p))
self.assertFalse(g.has_edge(p, o))
g.add_edge(o, p)
self.assertTrue(g.has_edge(o, p))
self.assertTrue(g.has_edge(p, o))
self.assertEqual(g.size, 1)
e = g.edges
self.assertTrue((o, p) in e or (p, o) in e, "Undirected should have "
"edge in both directions but only appear once in .edges")
self.assertIn(p, g[o])
def test_add_edges(self):
e = {(0, 1), (1, 2), (2, 3), (3, 4), (1, 4)}
g = c.Graph(range(6), e)
for u, v in e:
self.assertTrue(g.has_edge(u,
|
SeerLabs/PDFMEF
|
src/extractor/run_extraction.py
|
Python
|
apache-2.0
| 2,199
| 0.003183
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import extractor.csxextract.extractors.grobid as grobid
import extractor.csxextract.extractors.pdfbox as pdfbox
import extractor.csxextract.extractors.tei as tei
import extractor.csxextract.extractors.parscit as parscit
import extractor.csxextract.extractors.figures as figures
import extractor.csxextract.extractors.algorithms as algorithms
import extractor.csxextract.filters as filters
def get_extraction_runner():
runner = ExtractionRunner()
runner.enable_logging('~/logs/results', '~/logs/runnables')
runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
runner.add_runnable(grobid.GrobidHeaderTEIExtractor)
runner.add_runnable(tei.TEItoHeaderExtractor)
runner.add_runnable(parscit.ParsCitCitationExtractor)
runner.add_runnable(figures.PDFFiguresExtractor)
runner.add_runnable(algorithms.AlgorithmsExtractor)
return runner
if __name__ == '__main__':
runner = get_extraction_runner()
path = '/data/huy138/citeseerx-crawl-labeled-sample-b/pdf/'
outputDir = '/data/huy138/extraction_on_sample_b/'
listing = os.listdir(path)
folders = []
files = []
prefixes = []
for file in listing:
"
|
""folders = []
files = []
prefixes = []"""
if file[-4:] == '.pdf':
files.append(path + file)
folders.append(outputDir + file[:-4])
|
prefixes.append(file[:-4])
#print dir
print file
runner.run_from_file_batch(files, folders, num_processes=8, file_prefixes=prefixes)
print 'done'
"""argc = len(sys.argv)
if argc == 2:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], file_prefix=file_name)
elif argc == 3:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2], file_prefix=file_name)
else:
print("USAGE: python {0} path_to_pdf [output_directory]".format(sys.argv[0]))"""
|
sserrot/champion_relationships
|
venv/Lib/site-packages/cffi/__init__.py
|
Python
|
mit
| 513
| 0
|
__all__ = ['FFI', 'VerificationEr
|
ror', 'VerificationMissing', 'CDefError
|
',
'FFIError']
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
from .error import PkgConfigError
__version__ = "1.14.2"
__version_info__ = (1, 14, 2)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"
|
BlackHole/enigma2-obh10
|
lib/python/Screens/SoftwareUpdate.py
|
Python
|
gpl-2.0
| 19,545
| 0.021949
|
from boxbranding import getImageVersion, getImageBuild, getImageDevBuild, getImageType, getImageDistro, getMachineBrand, getMachineName, getMachineBuild
from os import path
from gettext import dgettext
from enigma import eTimer, eDVBDB
import Components.Task
from Components.OnlineUpdateCheck import feedsstatuscheck, kernelMismatch, statusMessage
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.ParentalControlSetup import ProtectedScreen
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.GitCommitInfo import CommitInfo, gitcommitinfo
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Button import Button
from Components.config import config
from Components.Console import Console
from Components.Ipkg import IpkgComponent
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
ocram = ''
class SoftwareUpdateChanges(CommitInfo):
def __init__(self, session):
CommitInfo.__init__(self, session)
self["actions"] = ActionMap(["SetupActions", "DirectionActions"],
{
'cancel': self.closeRecursive,
"red": self.closeRecursive,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown,
"left": self.left,
"right": self.right
}, -1)
self["key_red"] = Button(_("Close"))
def readGithubCommitLogs(self):
self.setTitle(gitcommitinfo.getScreenTitle())
self["AboutScrollLabel"].setText(gitcommitinfo.readGithubCommitLogsSoftwareUpdate())
class UpdateChoices(ChoiceBox):
def __init__(self, session, title="", list=None, keys=None, selection=0, skin_name=None, text="", reorderConfig="", var=""):
print 'title:', title
ChoiceBox.__init__(self, session, title, list, keys, selection, skin_name, text, reorderConfig, var)
print 'title:', title
if var and var in ('unstable', 'updating', 'stable', 'unknown'):
self.var = var
self['feedStatusMSG'] = Label()
self['tl_off'] = Pixmap()
self['tl_red'] = Pixmap()
self['tl_yellow'] = Pixmap()
self['tl_green'] = Pixmap()
self["menuActions"] = NumberActionMap(["MenuActions"],
{
"menu": self.opensettings
}, prio=-3) # Override ChoiceBox "menu" action
self.onShown.append(self.onshow)
def onshow(self):
if self.var:
from Components.OnlineUpdateCheck import feedsstatuscheck
if self.var in feedsstatuscheck.feed_status_msgs:
status_text = feedsstatuscheck.feed_status_msgs[self.var]
else:
status_text = _('Feeds status: Unexpected')
self['feedStatusMSG'].setText(status_text)
self['tl_off'].hide()
self['tl_red'].hide()
self['tl_yellow'].hide()
self['tl_green'].hide()
if self.var == 'unstable':
self['tl_red'].show()
elif self.var == 'updating':
self['tl_yellow'].show()
elif self.var == 'stable':
self['tl_green'].show()
else:
self['tl_off'].show()
def opensettings(self):
from Screens.Setup import Setup
self.session.open(Setup, "softwareupdate")
def cancelClick(self, dummy=False):
self.close()
class UpdatePlugin(Screen, ProtectedScreen):
def __init__(self, session, parent=None):
Screen.__init__(self, session, parent=parent)
ProtectedScreen.__init__(self)
self.setTitle(_("Software update"))
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
self['actions'].csel = self
self["actions"].setEnabled(False)
self.sliderPackages = {"dreambox-dvb-modules": 1, "enigma2": 2, "tuxbox-image-info": 3}
self.slider = Slider(0, 4)
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = StaticText(_("Please wait..."))
self["status"] = self.status
self.package = StaticText(_("Package list update"))
self["package"] = self.package
self.oktext = _("Press OK on your remote control to continue.")
self['tl_off'] = Pixmap()
self['tl_red'] = Pixmap()
self['tl_yellow'] = Pixmap()
self['tl_green'] = Pixmap()
self['feedStatusMSG'] = Label()
self.channellist_only = 0
self.channellist_name = ''
self.SettingsBackupDone = False
self.ImageBackupDone = False
self.autobackuprunning = False
self.updating = False
self.packages = 0
self.error = 0
self.processed_packages = []
self.total_packages = None
self.onFirstExecBegin.append(self.checkNetworkState)
def checkNetworkState(self):
self['tl_red'].hide()
self['tl_yellow'].hide()
self['tl_green'].hide()
self['tl_off'].hide()
self.trafficLight = feedsstatuscheck.getFeedsBool()
if self.trafficLight in feedsstatuscheck.feed_status_msgs:
status_text = feedsstatuscheck.feed_status_msgs[self.trafficLight]
else:
status_text = _('Feeds status: Unexpected')
if self.trafficLight:
self['feedStatusMSG'].setText(status_text)
if self.trafficLight == 'stable':
self['tl_green'].show()
elif self.trafficLight == 'unstable':
self['tl_red'].show()
elif self.trafficLight == 'updating':
self['tl_yellow'].show()
else:
self['tl_off'].show()
if (getImageType() != 'release' and self.trafficLight != 'unknown') or (getImageType() == 'release' and self.trafficLight not in ('stable', 'unstable')):
self.session.openWithCallback(self.close, MessageBox, feedsstatuscheck.getFeedsErrorMessage(), type=MessageBox.TYPE_INFO, timeout=30, close_on_any_key=True)
return
else:
if getImageType() != 'release' or (config.softwareupdate.updateisunstable.value == '1' and config.softwareupdate.updatebeta.value) or config.softwareupdate.updateisunstable.value == '0':
if kernelMismatch():
self.session.openWithCallback(self.close, MessageBox, _("The Linux kernel has changed, an update is not permitted. \nInstall latest image using USB stick or Image Manager."), type=MessageBox.TYPE_INFO, timeout=30, close_on_any_key=True)
return
message = statusMessage()
if message:
message += "\nDo you want to continue?"
self.session.openWithCallback(self.statusMessageCallback, MessageBox, message, type=MessageBox.TYPE_YESNO, default=False)
else:
self.startCheck()
else:
self.session.openWithCallback(self.close, MessageBox, _("Sorry the feeds seem to be in an unstable state, if you wish to use them please enable 'Allow unstable (experimental) updates' in \"Software update settings\"."), type=MessageBox.TYPE_INFO, timeout=10, close_on_any_key=True)
def statusMessageCallback(self, answer):
if answer:
self.startCheck()
else:
self.close()
def startCheck(self):
self.updating = True
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.activityTimer.start(100, False)
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
# We've just manually checked for an update, so note this time as the last
# check time, then update the next aut
|
ocheck time too.
#
from time import time
config.softwareupdate.updatelastcheck.setValue(int(time()))
config.softwareupdate.updatelastcheck.save()
from Components.OnlineUpdateCheck import onlineupdatecheckpoller
onlineupdatecheckpoller.start()
def isProtected(self):
return config.ParentalControl.setuppinactive.value and\
(not config.ParentalControl.config_sections.main_menu.value and
|
not config.ParentalControl.config_sections.configuration.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and\
config.ParentalControl.config_sections.software_update.value
def doActivityTimer(self):
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def showUpdateCompletedMessage(self):
self.setEndMessage(ngettext("Update completed, %d package was installed.", "Update completed, %d packages were installed.", self.packages) % self.packages)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE
|
samuelcolvin/aiohttp-devtools
|
aiohttp_devtools/runserver/__init__.py
|
Python
|
mit
| 97
| 0
|
# flake8: noqa
|
from .config import INFER_HOST
f
|
rom .main import run_app, runserver, serve_static
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/components/component/transceiver/physical_channels/channel/state/__init__.py
|
Python
|
apache-2.0
| 34,214
| 0.001257
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import output_power
from . import input_power
from . import laser_bias_current
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-platform - based on the path /components/component/transceiver/physical-channels/channel/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for channels
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__index",
"__description",
"__tx_laser",
"__target_output_power",
"__output_frequency",
"__output_power",
"__input_power",
"__laser_bias_current",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..max"]},
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="uint16",
is_config=False,
)
self.__description = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="description",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="string",
is_config=False,
)
self.__tx_laser = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="tx-laser",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="boolea
|
n",
is_config=False,
)
self.__target_output_power = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="target-output-power",
|
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="decimal64",
is_config=False,
)
self.__output_frequency = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="output-frequency",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="oc-opt-types:frequency-type",
is_config=False,
)
self.__output_power = YANGDynClass(
base=output_power.output_power,
is_container="container",
yang_name="output-power",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="container",
is_config=False,
)
self.__input_power = YANGDynClass(
base=input_power.input_power,
is_container="container",
yang_name="input-power",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="container",
is_config=False,
)
self.__laser_bias_current = YANGDynClass(
base=laser_bias_current.laser_bias_current,
is_container="container",
yang_name="laser-bias-current",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/platform/transceiver",
defining_module="openconfig-platform-transceiver",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"components",
"component",
"transceiver",
"physical-channels",
"channel",
"state",
]
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/index (uint16)
YANG Description: Index of the physical channnel or lane within a physical
client port
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /components/component/transceiver/physical_channels/channel/state/index (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: Index of the physica
|
EthanBlackburn/flanker
|
flanker/mime/message/headers/headers.py
|
Python
|
apache-2.0
| 4,810
| 0.000208
|
from webob.multidict import MultiDict
from flanker.mime.message.headers import encodedword
from flanker.mime.message.headers.parsing import normalize, parse_stream
from flanker.mime.message.headers.encoding import to_mime
from flanker.mime.message.errors import EncodingError
class MimeHeaders(object):
"""Dictionary-like object that preserves the order and
supports multiple values for the same key, knows
whether it has been changed after the creation
"""
def __init__(self, items=()):
self._v = MultiDict([(normalize(key), remove_newlines(val))
for (key, val) in items])
self.changed = False
self.num_prepends = 0
def __getitem__(self, key):
v = self._v.get(normalize(key), None)
if v is not None:
return encodedword.decode(v)
return None
def __len__(self):
return len(self._v)
def __iter__(self):
return iter(self._v)
def __contains__(self, key):
return normalize(key) in self._v
def __setitem__(self, key, value):
self._v[normalize(key)] = remove_newlines(value)
self.changed = True
def __delitem__(self, key):
del self._v[normalize(key)]
self.changed = True
def __nonzero__(self):
return len(self._v) > 0
def prepend(self, key, value):
self._v._items.insert(0, (normalize(key), remove_newlines(value)))
self.num_prepends += 1
def add(self, key, value):
"""Adds header without changing the
existing headers with same name"""
self._v.add(normalize(key), remove_newlines(value))
self.changed = True
def keys(self):
"""
Returns the keys. (message header names)
It remembers the order in which they were added, what
is really important
"""
return self._v.keys()
def transform(self, fn, decode=False):
"""Accepts a function, getting a key, val and returning
a new pair of key, val and applies the function to all
header, value pairs in the message.
"""
changed = [False]
def tracking_fn(key, val):
new_key, new_val = fn(key, val)
if new_val != val or new_key != key:
changed[0] = True
return new_key, new_val
v = MultiDict(tracking_fn(key, val) for key, val in self.iteritems(raw=not decode))
if changed[0]:
self._v = v
self.changed = True
def items(self):
"""
Returns header,val pairs in the preserved order.
"""
return list(self.iteritems())
def iteritems(self, raw=False):
"""
Returns iterator header,val pairs in the preserved order.
"""
if raw:
return self._v.iteritems()
return iter([(x[0], encodedword.decode(x[1]))
for x in self._v.iteritems()])
def get(self, key, default=None):
"""
Returns header value (case-insensitive).
"""
v = self._v.get(normalize(key), default)
if v is not None:
return encodedword.decode(v)
return None
def getraw(self, key, default=None):
"""
Returns raw header value (case-insensitive, non-decoded.
"""
return self._v.get(normalize(key), default)
def getall(self, key):
"""
Returns all header values by the given header name (case-insensitive).
"""
v = self._v.getall(normalize(key))
return [encodedword.decode(x) for x in v]
def have_changed(self, ignore_prepends=False):
"""
Tells whether someone has altered the headers after creation.
"""
return self.changed or (self.num_prepends > 0 and not ignore_prepends)
def __str__(self):
return str(self._v)
@classmethod
def from_stream(cls, stream):
"""
Takes a stream and reads the headers, decodes headers to unicode dict
like object.
"""
return cls(parse_stream(stream))
def
|
to_stream(self, stream, prepends_only=False):
"""
|
Takes a stream and serializes headers in a mime format.
"""
i = 0
for h, v in self.iteritems(raw=True):
if prepends_only and i == self.num_prepends:
break
i += 1
try:
h = h.encode('ascii')
except UnicodeDecodeError:
raise EncodingError("Non-ascii header name")
stream.write("{0}: {1}\r\n".format(h, to_mime(h, v)))
def remove_newlines(value):
if not value:
return ''
elif isinstance(value, (str, unicode)):
return value.replace('\r', '').replace('\n', '')
else:
return value
|
shlomimatichin/workflow
|
workflow/ticket/views.py
|
Python
|
gpl-3.0
| 10,924
| 0.065727
|
from django.template import Context, loader
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.views.decorators import cache
from workflow.ticket import forms
from workflow.ticket import models
from workflow.ticket import tickettree
import pprint
from django import shortcuts
import json
from django.views.decorators import csrf
from workflow import debug
from workflow import render
from workflow.customworkflow import customworkflow
from django.conf import settings
from workflow.timemachine import timemachine
def _lastViewedTickets( request ):
result = []
if request.COOKIES.has_key( 'lastViewedTickets' ):
for id in json.loads( request.COOKIES[ 'lastViewedTickets' ] ):
|
try:
result.append( models.Ticket.objects.get( id = id ) )
except:
pass
return result
def
|
_render( request, template, lastViewedTickets = None, ** kwargs ):
if lastViewedTickets is None:
lastViewedTickets = _lastViewedTickets( request )
kwargs[ 'lastViewedTickets' ] = reversed( lastViewedTickets )
return render.render( request, "ticket/" + template, ** kwargs )
@login_required
@cache.never_cache
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def index(request):
return _render( request, 'index.html' )
@login_required
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def viewHistory( request ):
return _render( request, 'viewhistory.html',
limitToUser = request.REQUEST.get( 'user', None ) )
@login_required
@debug.PrintException()
@timemachine.decorators.TimeTravel( allowedInTimeTravel = False )
def newTicket( request ):
if request.method == 'POST':
form = forms.NewTicket( request.POST )
if form.is_valid():
ticket = models.Ticket.create()
ticket.setProperty( name = 'Title', value = form.cleaned_data[ 'title' ], user = request.user )
ticket.setProperty( name = 'State', value = form.cleaned_data[ 'state' ], user = request.user )
return HttpResponseRedirect( 'viewTicket?ticket=%s' % ticket.id )
else:
form = forms.NewTicket()
return _render( request, 'newticket.html',
form = form,
tickettypes = [ s for s in customworkflow.ALL_STATES if s.showAsNew ] )
@login_required
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def viewTicket( request ):
ticket = models.Ticket.objects.get( id = request.REQUEST[ 'ticket' ] )
if ticket.state() == "No state":
return _render( request, 'viewticketfromthefuture.html', ticket = ticket )
transitions = [ '<li><a href="doTransition?ticket=%d&transition=%s">%s</a></li>' % ( ticket.id, t.name, t.name )
for t in customworkflow.STATE_MAP[ ticket.state() ].transitions() ]
spawnChilds = [ '<li><a href="doSpawnChild?ticket=%d&spawnchild=%s">%s</a></li>' % ( ticket.id, s.name, s.name )
for s in customworkflow.STATE_MAP[ ticket.state() ].spawnChilds ]
devider = [ '<li class="divider"></li>' ]
discard = [ '<li><a href="doTransition?ticket=%d&transition=Discard">Discard</a></li>' % ticket.id ]
actions = "\n".join( transitions + spawnChilds + devider + discard )
titleProperty = [ '<li><a href="setProperty?ticket=%d&property=Title">Title</a></li>' % ticket.id ]
stateProperty = [ '<li><a href="setProperty?ticket=%d&property=State">State</a></li>' % ticket.id ]
customProperties = [ '<li><a href="setProperty?ticket=%d&property=%s">%s</a></li>' % (
ticket.id, p.name, p.name ) for p in customworkflow.PROPERTIES ]
properties = "\n".join( titleProperty + stateProperty + devider + customProperties )
lastViewedTickets = _lastViewedTickets( request )
if ticket in lastViewedTickets:
lastViewedTickets.remove( ticket )
lastViewedTickets.append( ticket )
lastViewedTickets = lastViewedTickets[ -10 : ]
response = _render( request, 'viewticket.html',
ticket = ticket,
actions = actions,
properties = properties,
lastViewedTickets = lastViewedTickets )
response.set_cookie( 'lastViewedTickets', json.dumps( [ t.id for t in lastViewedTickets ] ) )
return response
@login_required
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def getHistory( request ):
ticketFilters = {}
if 'ticket' in request.REQUEST:
ticketFilters[ 'ticket' ] = models.Ticket.objects.get( id = request.REQUEST[ 'ticket' ] )
if 'user' in request.REQUEST:
ticketFilters[ 'user' ] = models.User.objects.get( id = request.REQUEST[ 'user' ] )
properties = list( timemachine.filter( models.Property.objects.filter( ** ticketFilters ) ) )
if 'ticket' in request.REQUEST:
relationsTo = timemachine.filter( models.Relation.objects.filter( ticket = request.REQUEST[ 'ticket' ] ) )
relationsFrom = timemachine.filter( models.Relation.objects.filter( relatedTo = request.REQUEST[ 'ticket' ] ) )
relations = list( relationsTo ) + list( relationsFrom )
else:
relations = list( timemachine.filter( models.Relation.objects.all() ) )
events = properties + relations
events.sort( key = lambda x: x.when, reverse = True )
page = int( request.REQUEST.get( 'page', 0 ) )
PAGE_SIZE = 100
first = page * PAGE_SIZE
bound = first + PAGE_SIZE
moreHistoryData = json.dumps( dict( request.REQUEST, page = page + 1 ) ) if bound < len( events ) else ""
paged = events[ first : bound ]
return _render( request, 'history.html', events = paged, moreHistoryData = moreHistoryData )
@login_required
@debug.PrintException()
@timemachine.decorators.TimeTravel( allowedInTimeTravel = False )
def doTransition( request ):
ticket = models.Ticket.objects.get( id = request.REQUEST[ 'ticket' ] )
transitionName = str( request.REQUEST[ 'transition' ] )
if transitionName == "Discard":
targetState = "Discarded"
else:
transition = customworkflow.STATE_MAP[ ticket.state() ].transitionByName( request.REQUEST[ 'transition' ] )
targetState = transition.targetState
ticket.setProperty( name = 'State', value = targetState, user = request.user )
return HttpResponseRedirect( 'viewTicket?ticket=%s' % ticket.id )
@login_required
@cache.never_cache
@debug.PrintException()
def findTicket( request ):
return _render( request, 'findticket.html' )
@csrf.csrf_exempt
@login_required
@cache.never_cache
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def searchTicketByFreeText( request ):
term = request.REQUEST[ 'term' ]
properties = list( timemachine.filter( models.Property.objects.filter( value__contains = term ) ) )
result = [ dict( label = "%s: %s" %( p.name, p.value ),
value = "viewTicket?ticket=%d" % p.ticket.id )
for p in properties[ -10 : ] ]
return HttpResponse( json.dumps( result ), mimetype = "application/json" )
@login_required
@cache.never_cache
@debug.PrintException()
@timemachine.decorators.TimeTravel()
def ticketTree( request ):
if 'path' in request.REQUEST:
result = tickettree.TicketTree( request.REQUEST[ 'path' ] ).nodes()
return HttpResponse( json.dumps( result ), mimetype = "application/json" )
else:
return _render( request, 'tickettree.html', user = request.user )
@login_required
@cache.never_cache
@debug.PrintException()
@timemachine.decorators.TimeTravel( allowedInTimeTravel = False )
def setRelation( request ):
name = request.REQUEST[ 'name' ]
assert name in [ 'Parent Of', 'Not Parent Of' ]
ticket = models.Ticket.objects.get( id = request.REQUEST[ 'ticket' ] )
relatedTo = models.Ticket.objects.get( id = request.REQUEST[ 'relatedTo' ] )
assert ticket != relatedTo
assert name != 'Parent Of' or relatedTo not in ticket.children()
assert name != 'Not Parent Of' or relatedTo in ticket.children()
ticket.addRelationAtEnd( name, relatedTo, request.user )
if 'redirectToViewTicket' in request.REQUEST:
return HttpResponseRedirect( 'viewTicket?ticket=%s' % request.REQUEST[ 'redirectToViewTicket' ] )
else:
return HttpResponse( '' )
@login_required
@cache.never_cache
@debug.PrintException()
@timemachine.decorators.TimeTravel( allowedInTimeTravel = False )
def reorderRelation( request ):
name = request.REQUEST[ 'name' ]
assert not name.startswith( 'Not ' )
ticket = models.Ticket.objects.get( id = request.REQUEST[ 'ticket' ] )
relations = ticket.relations( request.REQUEST[ 'name' ] )
order = [ int( o ) for o in request.REQUEST.getlist( 'order[]' ) ]
assert len(
|
Kozea/CairoSVG
|
test_non_regression/__init__.py
|
Python
|
lgpl-3.0
| 796
| 0
|
"""
Cairo test suite.
"""
import imp
import os
impor
|
t cairosvg
reference_cairosvg = imp.load_source(
'cairosvg_reference', pathname=os.path.join(
os.path.dirname(__file__), 'cairosvg_reference', 'cairosvg',
'__init__.py'))
cairosvg.features.LOCALE = reference_cairosvg.features.LOCALE = 'en_US'
TEST_FOLDER = os.path.join(os.path.dirname(__file__), 'svg')
os.chdir(TEST_FOLDER) # relative image urls
if os.environ.get('CAIROSVG_TEST_FILES'): # pragma: no cover
ALL_FILES = os.en
|
viron['CAIROSVG_TEST_FILES'].split(',')
else:
ALL_FILES = os.listdir(TEST_FOLDER)
ALL_FILES.sort(key=lambda name: name.lower())
FILES = [
os.path.join(
os.path.dirname(TEST_FOLDER) if name.startswith('fail')
else TEST_FOLDER, name)
for name in ALL_FILES]
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/checkers/python3.py
|
Python
|
apache-2.0
| 38,460
| 0.00169
|
# Copyright (c) 2014-2015 Brett Cannon <brett@python.org>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Pavel Roskin <proski@gnu.org>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Check Python 2 code for Python 2/3 source-compatible issues."""
from __future__ import absolute_import, print_function
import re
import sys
import tokenize
from collections import namedtuple
import six
import astroid
from astroid import bases
from pylint import checkers, interfaces
from pylint.interfaces import INFERENCE_FAILURE, INFERENCE
from pylint.utils import WarningScope
from pylint.checkers import utils
_ZERO = re.compile("^0+$")
def _is_old_octal(literal):
if _ZERO.match(literal):
return False
if re.match(r'0\d+', literal):
try:
int(literal, 8)
except ValueError:
return False
return True
def _check_dict_node(node):
inferred_types = set()
try:
inferred = node.infer()
for inferred_node in inferred:
inferred_types.add(inferred_node)
except astroid.InferenceError:
pass
return (not inferred_types
or any(isinstance(x, astroid.Dict) for x in inferred_types))
def _is_builtin(node):
return getattr(node, 'name', None) in ('__builtin__', 'builtins')
_ACCEPTS_ITERATOR = {'iter', 'list', 'tuple', 'sorted', 'set', 'sum', 'any',
'all', 'enumerate', 'dict'}
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
elif isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
parent_scope = parent.func.lookup(parent.func.name)[0]
if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname == 'join':
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif (isinstance(parent, astroid.Assign) and
isinstance(parent.targets[0], (astroid.List, astroid.Tuple))):
if len(parent.targets[0].elts) > 1:
return True
return False
def _is_conditional_import(node):
"""Checks if a import node is in the context of a conditional.
"""
parent = node.parent
return isinstance(parent, (astroid.TryExcept, astroid.ExceptHandler,
astroid.If, astroid.IfExp))
Branch = namedtuple('Branch', ['node', 'is_py2_only'])
class Python3Checker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
enabled = False
name = 'python3'
msgs = {
# Errors for what will syntactically break in Python 3, warnings for
# everything else.
'E1601': ('print statement used',
'print-statement',
'Used when a print statement is used '
'(`print` is a function in Python 3)',
{'maxversion': (3, 0)}),
'E1602': ('Parameter unpacking specified',
'parameter-unpacking',
'Used when parameter unpacking is specified for a function'
"(Python 3 doesn't allow it)",
{'maxversion': (3, 0)}),
'E1603': ('Implicit unpacking of exceptions is not supported '
'in Python 3',
'unpacking-in-except',
'Python3 will not allow implicit unpacking of '
'exceptions in except clauses. '
'See http://www.python.org/dev/peps/pep-3110/',
{'maxversion': (3, 0),
'old_names': [('W0712', 'unpacking-in-except')]}),
'E1604': ('Use raise ErrorClass(args) instead of '
'raise ErrorClass, args.',
'old-raise-syntax',
"Used when the alternate raise syntax "
"'raise foo, bar' is used "
"instead of 'raise foo(bar)'.",
{'maxversion': (3, 0),
'old_names': [('W0121', 'old-raise-syntax')]}),
'E1605': ('Use of the `` operator',
'backtick',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.',
{'scope': WarningScope.NODE,
'maxversion': (3, 0),
'old_names': [('W0333', 'backtick')]}),
'E1609': ('Import * only allowed at module level',
'import-star-module-level',
'Used when the import star syntax is used somewhere '
'else than the module level.',
{'maxversion': (3, 0)}),
'W1601': ('apply built-in referenced',
'apply-builtin',
'Used when the apply built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1602': ('basestring built-in referenced',
'basestring-builtin',
'Used when the basestring built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1603': ('buffer built-in referenced',
'buffer-builtin',
'Used when the buffer built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1604': ('cmp built-in referenced',
'cmp-builtin',
'Used when the cmp built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1605': ('coerce built-in referenced',
'coerce-builtin',
'Used when the coerce built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1606': ('execfile built-in referenced',
'execfile-builtin',
'Used when the execfile built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1607': ('file built-in referenced',
'file-builtin',
'Used when the file built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1608': ('long built-in referenced',
'long-builtin',
'Used when the long built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'W1609': ('raw_input built-in referenced',
'raw_input-builtin',
'Used when the raw_input built-in function is referenced '
'(missing from Python 3)',
{'maxversion': (3, 0)}),
'
|
W1610': ('reduce built-in referenced',
'reduce-builtin',
'Used when the reduce built-in function is referenced '
'(missing from
|
Python 3)',
{'maxversion': (3, 0)}),
'W1611': ('StandardError built-in referenced',
'standarderror-builtin',
'Used when the StandardError built-in function is referenced '
|
old-paoolo/hokuyo-python-lib
|
setup.py
|
Python
|
mit
| 1,228
| 0
|
# coding=utf-8
# !/usr/bin/env python
try:
from setuptools import setup
except ImportError:
print('No setuptools installed, use distutils')
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='hokuyo-python-lib',
packages=[
'hokuyo',
'hokuyo.driver',
'hokuyo.tools',
'hokuyo.tests'
],
package_dir={
'hokuyo': 'src/hokuyo',
'hokuyo.driver': 'src/hokuyo/driver',
'hokuyo.tools': 'src/hokuyo/tools',
'hokuyo.tests': 'src/hokuyo/tests'
},
install_requires=required,
version='1.3',
description='Hokuyo driver in python',
author=u'Paweł Suder',
author_email='pawel@suder.info',
url='http://project-capo.github.io/',
download_url='http://github.com/ol
|
d-paoolo/hokuyo-python-lib',
keywords=[
'hokuyo'
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
|
'Operating System :: OS Independent',
],
long_description='''\
'''
)
|
BielBdeLuna/idTechX_Python_API
|
text_file.py
|
Python
|
gpl-3.0
| 1,232
| 0.016234
|
import utils
class text_file:
lines = []
def gather_lines_from_file( filename ):
with open(filename) as f:
lines = f.readlines()
self.lines = lines
def discard_lines_from_file():
correct_lines = []
comment = False
asterisk = False
data_in_that_line = True
for l in lines:
interpreted_line = []
working_chars = []
last_char = ""
i = 0
data_in_that_line = True
if comment == True:
data_in_that_line = False
for i in range(0, len(l)):
c = l[i]
if c == "/"
if l[i + 1] == "/"
if i =
|
= 0
data_in_that_line = Fals
|
e
comment = True
i = i + 1
if comment == False and data_in_that_line == True:
correct_lines_string = ''.join(correct_lines)
correct_lines.append(correct_lines_string)
self.lines = correct_lines
def __init__( filename )
gather_lines_from_file( filename )
discard_lines_from_file()
|
IPMITMO/statan
|
coala-bears/bears/latex/LatexLintBear.py
|
Python
|
mit
| 886
| 0
|
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@li
|
nter(executable='chktex',
output_format='regex',
output_regex=r'(?P<severity>Error|Warning) \d+ in .+ line '
r'(?P<line>\d+): (?P<message>.*)')
class LatexLintBear:
"""
Ch
|
ecks the code with ``chktex``.
"""
LANGUAGES = {'Tex'}
REQUIREMENTS = {DistributionRequirement('chktex', zypper='texlive-chktex')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file):
return (
'--format',
'%k %n in {0} line %l: %m!n'.format(filename),
filename,
)
|
brownian/frescobaldi
|
frescobaldi_app/hyphenator.py
|
Python
|
gpl-2.0
| 8,884
| 0.001126
|
"""
This is a Pure Python module to hyphenate text.
It is inspired by Ruby's Text::Hyphen, but currently reads standard *.dic files,
that must be installed separately.
In the future it's maybe nice if dictionaries could be distributed together with
this module, in a slightly prepared form, like in Ruby's Text::Hyphen.
Wilbert Berendsen, March 2008
info@wilbertberendsen.nl
License: LGPL. More info: http://python-hyphenator.googlecode.com/
"""
try:
chr = unichr
except NameError:
pass
import codecs
import re
__all__ = ["Hyphenator"]
# cache of per-file HyphenationDictionary objects
_hdcache = {}
# precompile some regular expressions
parse = re.compile(r'(\d?)(\D?)').findall
# Match ^^xx where xx is a two-digit hexadecimal value
_hex_re = re.compile(r'\^{2}([0-9a-f]{2})')
# replace the matched hex string with the corresponding unicode character
_hex_repl = lambda matchObj: chr(int(matchObj.group(1), 16))
def replace_hex(text):
"""Replaces ^^xx (where xx is a two-digit hexadecimal value) occurrences
by the corresponding unicode character.
"""
return _hex_re.sub(_hex_repl, text)
class ParsedAlternative(object):
"""Parse nonstandard hyphen pattern alternative.
when called with an odd value, the instance returns an integer with data
attribute (DataInt) about the current position in the pattern.
"""
def __init__(self, pat, alt):
alt = alt.split(',')
self.change = alt[0]
if len(alt) > 2:
self.index = int(alt[1])
self.cut = int(alt[2]) + 1
else:
self.index = 1
self.cut = len(re.sub(r'[\d\.]', '', pat)) + 1
if pat.startswith('.'):
self.index += 1
def __call__(self, val):
self.index -= 1
val = int(val)
if val & 1:
return DataInt(val, (self.change, self.index, self.cut))
else:
return val
class DataInt(int):
"""An integer with a data attribute.
Just an int some other data can be stuck to in a data attribute.
Instantiate with ref=other to use the data from the other DataInt.
"""
def __new__(cls, value, data=None, ref=None):
obj = int.__new__(cls, value)
if ref and type(ref) is DataInt:
obj.data = ref.data
else:
obj.data = data
return obj
class HyphenationDictionary(object):
"""Reads a hyph_*.dic file and stores the hyphenation patterns.
Parameters:
filename : filename of hyph_*.dic pattern file to read
"""
def __init__(self, filename):
self.patterns = {}
with open(filename, 'rb') as f:
# use correct encoding, specified in first line
for encoding in f.readline().split():
if encoding != b"charset":
try:
decoder = codecs.getreader(encoding.decode('ascii'))
break
except LookupError:
pass
else:
decoder = codecs.getreader('latin1')
for pat in decoder(f):
pat = pat.strip()
if not pat or pat[0] == '%':
continue
# replace ^^hh with the real character
pat = replace_hex(pat)
# read nonstandard hyphen alternatives
if '/' in pat:
pat, alt = pat.split('/', 1)
factory = ParsedAlternative(pat, alt)
else:
factory = int
tag, values = zip(*[(s, factory(i or "0"))
for i, s in parse(pat)])
# if only zeros, skip this pattern
if any(values):
# strip zeros and store start offset.
start, end = 0, len(values)
while not values[start]:
start += 1
while not values[end-1]:
end -= 1
self.patterns[''.join(tag)] = start, values[start:end]
self.cache = {}
self.maxlen = max(map(len, self.patterns))
def positions(self, word):
"""Returns a list of positions where the word can be hyphenated.
E.g. for the dutch word 'lettergrepen' this method returns
the list [3, 6, 9].
Each position is a 'data int' (DataInt) with a data attribute.
If the data attribute is not None, it contains a tuple with
information about nonstandard hyphenation at that point:
(change, index, cut)
change: is a string like 'ff=f', that describes how hyphenation
should take place.
index: where to substitute the change, counting from the current
point
cut: how many characters to remove while substituting the nonstandard
hyphenation
"""
word = word.lower()
try:
return self.cache[word]
except KeyError:
pass
prepWord = '.' + word + '.'
res = [0] * (len(prepWord) + 1)
for i in range(len(prepWord) - 1):
for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1):
p =
|
self.patterns.get(prepWord[i:j])
if p:
offset, values = p
s = slice(i + offset, i + offset + len(values))
res[s] = map(max, values, res[s])
positions = [DataInt(i - 1, ref=r) for i, r in enumerate(res) if r % 2]
|
self.cache[word] = positions
return positions
class Hyphenator(object):
"""Reads a hyph_*.dic file and stores the hyphenation patterns.
Provides methods to hyphenate strings in various ways.
Parameters:
-filename : filename of hyph_*.dic to read
-left: make the first syllable not shorter than this
-right: make the last syllable not shorter than this
-cache: if true (default), use a cached copy of the dic file, if possible
left and right may also later be changed:
h = Hyphenator(file)
h.left = 1
"""
def __init__(self, filename, left=2, right=2, cache=True):
self.left = left
self.right = right
if not cache or filename not in _hdcache:
_hdcache[filename] = HyphenationDictionary(filename)
self.hd = _hdcache[filename]
def positions(self, word):
"""Returns a list of positions where the word can be hyphenated.
See also HyphenationDictionary.positions. The points that are too far to
the left or right are removed.
"""
right = len(word) - self.right
return [i for i in self.hd.positions(word) if self.left <= i <= right]
def iterate(self, word):
"""Iterate over all hyphenation possibilities, the longest first."""
for p in reversed(self.positions(word)):
if p.data:
# get the nonstandard hyphenation data
change, index, cut = p.data
if word.isupper():
change = change.upper()
c1, c2 = change.split('=')
yield word[:p+index] + c1, c2 + word[p+index+cut:]
else:
yield word[:p], word[p:]
def wrap(self, word, width, hyphen='-'):
"""Returns the longest possible first part and the last part of the
hyphenated word.
The first part has the hyphen already attached. Returns None, if there
is no hyphenation point before width, or if the word could not be
hyphenated.
"""
width -= len(hyphen)
for w1, w2 in self.iterate(word):
if len(w1) <= width:
return w1 + hyphen, w2
def inserted(self, word, hyphen='-'):
"""Returns the word as a string with all the possible hyphens inserted.
E.g. for the dutch word 'lettergrepen' this method returns the string
'let-ter-gre-pen'. The hyphen string to use can be given as the second
parameter, that defaults to '-'.
"""
l = list(word)
for p in reversed(self.positions(w
|
state-hiu/rogue_geonode
|
geoshape/views.py
|
Python
|
gpl-3.0
| 3,021
| 0.001655
|
import logging
from django.conf import settings
from django.http import HttpResponse
from django.http.request import validate_host
from django.utils.http import is_safe_url
from httplib import HTTPConnection, HTTPSConnection
from urlparse import urlsplit
from geonode.geoserver.helpers import ogc_server_settings
logger = logging.getLogger(__name__)
def proxy(request):
PROXY_ALLOWED_HOSTS = getattr(settings, 'PROXY_ALLOWED_HOSTS', ())
hostname = (ogc_server_settings.hostname,) if ogc_server_settings else ()
PROXY_ALLOWED_HOSTS += hostname
if 'url' not in request.GET:
return HttpResponse("The proxy service requires a URL-encoded URL as a parameter.",
status=400,
|
content_type="text/plain"
)
raw_url = request.GET['url']
url = urlsplit(raw_url)
locator = url.path
if url.query != "":
locator += '?' + url.query
if url.fragment != "":
locator += '#' + url.fragment
logger.debug('Incoming headers: {0}'.format(request.META))
if not settings.DEBUG:
if not validate_host(url.hostname, PROXY_ALLOWED_HOSTS):
return HttpResponse("DEBUG is set to F
|
alse but the host of the path provided "
"to the proxy service is not in the "
"PROXY_ALLOWED_HOSTS setting.",
status=403,
content_type="text/plain"
)
headers = {}
if settings.SESSION_COOKIE_NAME in request.COOKIES and is_safe_url(url=raw_url, host=ogc_server_settings.netloc):
headers["Cookie"] = request.META["HTTP_COOKIE"]
if request.META.get('HTTP_AUTHORIZATION'):
headers['AUTHORIZATION'] = request.META.get('HTTP_AUTHORIZATION')
if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META:
headers["Content-Type"] = request.META["CONTENT_TYPE"]
if request.META.get('HTTP_ACCEPT'):
headers['ACCEPT'] = request.META['HTTP_ACCEPT']
logger.debug('Outgoing request method: {0}'.format(request.method))
logger.debug('Outgoing request locator: {0}{1}'.format(url.hostname, locator))
logger.debug('Outgoing request headers: {0}'.format(headers))
if url.scheme == 'https':
conn = HTTPSConnection(url.hostname, url.port)
else:
conn = HTTPConnection(url.hostname, url.port)
conn.request(request.method, locator, request.body, headers)
result = conn.getresponse()
logger.debug('Response headers: {0}'.format(result.getheaders()))
logger.debug('Response status: {0}'.format(result.status))
response = HttpResponse(result.read(),
status=result.status,
content_type=result.getheader("Content-Type", "text/plain"),
)
if result.getheader('www-authenticate'):
response['www-authenticate'] = "GeoNode"
return response
|
vicnet/weboob
|
modules/regionsjob/__init__.py
|
Python
|
lgpl-3.0
| 847
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/
|
or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for mor
|
e details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from .module import RegionsjobModule
__all__ = ['RegionsjobModule']
|
openstack/smaug
|
karbor/services/protection/client_factory.py
|
Python
|
apache-2.0
| 3,806
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import service_token
from keystoneauth1 import session as keystone_session
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from karbor.common import karbor_keystone_plugin
from karbor import exception
from karbor.i18n import _
LOG = logging.getLogger(__name__)
class ClientFactory(object):
_factory = None
_keystone_plugin = None
@staticmethod
def _list_clients():
clients_dir = os.path.join(os.path.dirname(__file__), 'clients')
if not os.path.isdir(clients_dir):
LOG.error('clients directory "%s" not found', clients_dir)
return
for file in os.listdir(clients_dir):
name, ext = os.path.splitext(file)
if name != '__init__' and name != 'utils' and ext == '.py':
LOG.debug('Found client "%s"', name)
yield '%s.clients.%s' % (__package__, name)
@classmethod
def _generate_session(cls, context, service, privileged_user=False):
LOG.debug("Generate an auth session. privileged_user: %s",
privileged_user)
plugin = cls.get_keystone_plugin()
try:
if privileged_user is True:
auth_plugin = service_token.ServiceTokenAuthWrapper(
plugin.service_auth_plugin,
plugin.service_auth_plugin)
else:
auth_plugin = service_token.ServiceTokenAuthWrapper(
plugin.create_user_auth_plugin(context),
plugin.service_auth_plugin)
except Exception:
return None
try:
client_conf = cfg.CONF['%s_client' % service]
auth_insecure = client_conf['%s_auth_insecure' % service]
ca_file = client_conf['%s_ca_cert_file' % service]
verify = False if auth_insecure else (ca_file or True)
except Exception:
verify = True
return keystone_session.Session(auth=auth_plugin, verify=verify)
@classmethod
def get_keystone_plugin(cls):
if not cls._keystone_plugin:
cls._keystone_plugin = \
karbor_keystone_plugin.KarborKeystonePlugin()
return cls._k
|
eystone_plugin
|
@classmethod
def get_client_module(cls, service):
if not cls._factory:
cls._factory = {}
for module in cls._list_clients():
module = importutils.import_module(module)
cls._factory[module.SERVICE] = module
return cls._factory.get(service)
@classmethod
def create_client(cls, service, context, conf=cfg.CONF,
privileged_user=False, **kwargs):
module = cls.get_client_module(service)
if module is None:
raise exception.KarborException(_('Unknown service(%s)') % service)
kwargs['privileged_user'] = privileged_user
kwargs['keystone_plugin'] = cls.get_keystone_plugin()
if context or privileged_user:
kwargs['session'] = cls._generate_session(context, service,
privileged_user)
return module.create(context, conf, **kwargs)
|
MSFTOSSMgmt/WPSDSCLinux
|
Providers/nxOMSAutomationWorker/automationworker/3.x/worker/urllib2httpclient.py
|
Python
|
mit
| 9,775
| 0.003581
|
#!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
"""Urllib2 HttpClient."""
import httplib
import socket
import time
import traceback
import urllib2
from httpclient import *
from workerexception import *
PY_MAJOR_VERSION = 0
PY_MINOR_VERSION = 1
PY_MICRO_VERSION = 2
SSL_MODULE_NAME = "ssl"
# On some system the ssl module might be missing
try:
import ssl
except ImportError:
ssl = None
class HttpsClientHandler(urllib2.HTTPSHandler):
"""Https handler to enable attaching cert/key to request. Also used to disable strict cert verification for
testing.
"""
def __init__(self, cert_path, key_path, insecure=False):
self.cert_path = cert_path
self.key_path = key_path
ssl_context = None
if insecure and SSL_MODULE_NAME in sys.modules and (sys.version_info[PY_MAJOR_VERSION] == 2 and
sys.version_info[PY_MINOR_VERSION] >= 7 and
sys.version_info[PY_MICRO_VERSION] >= 9):
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
urllib2.HTTPSHandler.__init__(self, context=ssl_context) # Context can be None here
def https_open(self, req):
return self.do_open(self.get_https_connection, req, context=self._context)
def get_https_connection(self, host, context=None, timeout=180):
"""urllib2's AbstractHttpHandler will invoke this method with the host/timeout parameter. See urllib2's
AbstractHttpHandler for more details.
Args:
host : string , the host.
context : ssl_context , the ssl context.
timeout : int , the timeout value in seconds.
Returns:
An HttpsConnection
"""
socket.setdefaulttimeout(180)
if self.cert_path is None or self.key_path is None:
return httplib.HTTPSConnection(host, timeout=timeout, context=context)
else:
return httplib.HTTPSConnection(host, cert_file=self.cert_path, key_file=self.key_path, timeout=timeout,
context=context)
def request_retry_handler(func):
def decorated_func(*args, **kwargs):
max_retry_count = 3
for iteration in range(0, max_retry_count, 1):
try:
ret = func(*args, **kwargs)
return ret
except Exception as exception:
if iteration >= max_retry_count - 1:
raise RetryAt
|
temptExceededException(traceback.format_exc())
elif SSL_MODULE_NAME in sys.modules:
if type(exception).__name__ == 'SSLError':
time.sleep(5 + iteration)
|
continue
elif isinstance(exception, urllib2.URLError):
if "name resolution" in exception.reason:
time.sleep(5 + iteration)
continue
raise exception
return decorated_func
class Urllib2HttpClient(HttpClient):
"""Urllib2 http client. Inherits from HttpClient.
Targets:
[2.7.9 - 2.7.9+] only due to the lack of strict certificate verification prior to this version.
Implements the following method common to all classes inheriting HttpClient.
get (url, headers)
post (url, headers, data)
"""
def __init__(self, cert_path, key_path, insecure=False, proxy_configuration=None):
HttpClient.__init__(self, cert_path, key_path, insecure, proxy_configuration)
@request_retry_handler
def issue_request(self, url, headers, method=None, data=None):
"""Issues a GET request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : string , contains the serialized request body.
Returns:
A RequestResponse
:param method:
"""
https_handler = HttpsClientHandler(self.cert_path, self.key_path, self.insecure)
opener = urllib2.build_opener(https_handler)
if self.proxy_configuration is not None:
proxy_handler = urllib2.ProxyHandler({'http': self.proxy_configuration,
'https': self.proxy_configuration})
opener.add_handler(proxy_handler)
req = urllib2.Request(url, data=data, headers=headers)
req.get_method = lambda: method
response = opener.open(req, timeout=30)
opener.close()
https_handler.close()
return response
def get(self, url, headers=None):
"""Issues a GET request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
Returns:
An http_response
"""
headers = self.merge_headers(self.default_headers, headers)
try:
response = self.issue_request(url, headers=headers, method=self.GET)
except urllib2.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException :
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
def post(self, url, headers=None, data=None):
"""Issues a POST request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : dictionary, contains the non-serialized request body.
Returns:
A RequestResponse
"""
headers = self.merge_headers(self.default_headers, headers)
if data is None:
serial_data = ""
else:
serial_data = self.json.dumps(data)
headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})
try:
response = self.issue_request(url, headers=headers, method=self.POST, data=serial_data)
except urllib2.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException:
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
def put(self, url, headers=None, data=None):
"""Issues a PUT request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : dictionary, contains the non-serialized request body.
Returns:
A RequestResponse
"""
headers = self.merge_headers(self.default_headers, headers)
if data is None:
serial_data = ""
else:
serial_data = self.json.dumps(data)
headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})
try:
response = self.issue_request(url, headers=headers, method=self.PUT, data=serial_data)
except urllib2.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_i
|
urisimchoni/samba
|
third_party/waf/wafadmin/Logs.py
|
Python
|
gpl-3.0
| 2,875
| 0.041739
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
import ansiterm
import os, re, logging, traceback, sys
from Constants import *
zones = ''
verbose = 0
colors_lst = {
'USE' : True,
'BOLD' :'\x1b[01;1m',
'RED' :'\x1b[01;31m',
'GREEN' :'\x1b[32m',
'YELLOW':'\x1b[33m',
'PINK' :'\x1b[35m',
'BLUE' :'\x1b[01;34m',
'CYAN' :'\x1b[36m',
'NORMAL':'\x1b[0m',
'cursor_on' :'\x1b[?25h',
'cursor_off' :'\x1b[?25l',
}
got_tty = False
term = os.environ.get('TERM', 'dumb')
if not term in ['dumb', 'emacs']:
try:
got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys'])
except AttributeError:
pass
import Utils
if not got_tty or 'NOCOLOR' in os.environ:
colors_lst['USE'] = False
# test
#if sys.platform == 'win32':
# colors_lst['USE'] = True
def get_color(cl):
if not colors_lst['USE']: return ''
return colors_lst.get(cl, '')
class foo(object):
def __getattr__(self, a):
return get_color(a)
def __call__(self, a):
return get_color(a)
colors = foo()
re_log = re.compile(r'(\w+): (.*)', re.M)
class log_filter(logging.Filter):
def __init__(self, name=None):
pass
def filter(self, rec):
rec.c1 = colors.PINK
rec.c2 = colors.NORMAL
rec.zone = rec.module
if rec.levelno >= logging.INFO:
if rec.levelno >= logging.ERROR:
rec.c1 = colors.RED
elif rec.levelno >= logging.WARNING:
rec.c1 = colors.YELLOW
else:
rec.c1 = colors.GREEN
return True
zone = ''
m = re_log.match(rec.msg)
if m:
zone = rec.zone = m.group(1)
rec.msg = m.group(2)
if zones:
return getattr(rec, 'zone', '') in zones or '*' in
|
zones
elif not verbose > 2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
def format(self, rec):
if rec.levelno >=
|
logging.WARNING or rec.levelno == logging.INFO:
try:
return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2)
except:
return rec.c1+rec.msg+rec.c2
return logging.Formatter.format(self, rec)
def debug(*k, **kw):
if verbose:
k = list(k)
k[0] = k[0].replace('\n', ' ')
logging.debug(*k, **kw)
def error(*k, **kw):
logging.error(*k, **kw)
if verbose > 1:
if isinstance(k[0], Utils.WafError):
st = k[0].stack
else:
st = traceback.extract_stack()
if st:
st = st[:-1]
buf = []
for filename, lineno, name, line in st:
buf.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
buf.append(' %s' % line.strip())
if buf: logging.error("\n".join(buf))
warn = logging.warn
info = logging.info
def init_log():
log = logging.getLogger()
log.handlers = []
log.filters = []
hdlr = logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
# may be initialized more than once
init_log()
|
petertodd/timelock
|
lib/python-bitcoinlib/bitcoin/rpc.py
|
Python
|
gpl-3.0
| 16,944
| 0.002833
|
# Copyright 2011 Jeff Garzik
#
# RawProxy has the following improvements over python-jsonrpc's ServiceProxy
# class:
#
# - HTTP connections persist for the life of the RawProxy object (if server
# supports HTTP/1.1)
# - sends protocol 'version', per JSON-RPC 1.1
# - sends proper, incrementing 'id'
# - sends Basic HTTP authentication headers
# - parses all JSON numbers that look like floats as Decimal
# - uses standard Python json lib
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Bitcoin Core RPC support"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import binascii
import decimal
import json
import os
import platform
import sys
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
import bitcoin
from bitcoin.core import COIN, lx, b2lx, CBlock, CTransaction, COutPoint, CTxOut
from bitcoin.core.script import CScript
from bitcoin.wallet import CBitcoinAddress
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
# (un)hexlify to/from unicode, needed for Python3
unhexlify = binascii.unhexlify
hexlify = binascii.hexlify
if sys.version > '3':
unhexlify = lambda h: binascii.unhexlify(h.encode('utf8'))
hexlify = lambda b: binascii.hexlify(b).decode('utf8')
class JSONRPCException(Exception):
def __init__(self, rpc_error):
super(JSONRPCException, self).__init__('msg: %r code: %r' %
(rpc_error['message'], rpc_error['code']))
self.error = rpc_error
class RawProxy(object):
# FIXME: need a CChainParams rather than hard-coded service_port
def __init__(self, service_url=None,
service_port=None,
btc_conf_file=None,
timeout=HTTP_TIMEOUT,
_connection=None):
"""Low-level JSON-RPC proxy
Unlike Proxy no conversion is done from the raw JSON objects.
"""
if service_url is None:
# Figure out the path to the bitcoin.conf file
if btc_conf_file is None:
if platform.system() == 'Darwin':
btc_conf_file = os.path.expanduser('~/Library/Application Support/Bitcoin/')
elif platform.system() == 'Windows':
btc_conf_file = os.path.join(os.environ['APPDATA'], 'Bitcoin')
else:
btc_conf_file = os.path.expanduser('~/.bitcoin')
btc_conf_file = os.path.join(btc_conf_file, 'bitcoin.conf')
# Extract contents of bitcoin.conf to build service_url
with open(btc_conf_file, 'r') as fd:
conf = {}
for line in fd.readlines():
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
conf[k.strip()] = v.strip()
if service_port is None:
service_port = bitcoin.params.RPC_PORT
conf['rpcport'] = int(conf.get('rpcport', service_port))
conf['rpcssl'] = conf.get('rpcssl', '0')
if conf['rpcssl'].lower() in ('0', 'false'):
conf['rpcssl'] = False
elif conf['rpcssl'].lower() in ('1', 'true'):
conf['rpcssl'] = True
else:
raise ValueError('Unknown rpcssl value %r' % conf['rpcssl'])
service_url = ('%s://%s:%s@localhost:%d' %
('https' if conf['rpcssl'] else 'http',
conf['rpcuser'], conf['rpcpassword'],
conf['rpcport']))
self.__service_url = service_url
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
self.__id_count = 0
authpair = "%s:%s" % (self.__url.username, self.__url.password)
authpair = authpair.encode('utf8')
self.__auth_header = b"Basic " + base64.b64encode(authpair)
if _connection:
# Callables re-use the connection of the original proxy
self.__conn = _connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port=port,
key_file=None, cert_file=None,
timeout=timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port=port,
timeout=timeout)
def _call(self, service_name, *args):
self.__id_count += 1
postdata = json.dumps({'version': '1.1',
'method': service_name,
'params': args,
'id': self.__id_count})
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
response = self._get_response()
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
f = lambda *args: self._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list))
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
return self._get_response()
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'
|
code': -342, 'message': 'missing HTTP response from server'})
return json.loads(http_response.read().decode('utf8'),
parse_float=decimal.Decimal)
class Proxy(RawProxy):
def __init__(self, service_url=None,
service_port=None,
btc_conf_file=None,
timeout=HTTP_TIMEOUT,
|
**kwargs):
"""Create a proxy to a bitcoin RPC service
Unlike RawProxy data is passed as objects, rather than JSON. (not yet
fully implemented) Assumes Bitcoin Core version >= 0.9; older versions
mostly work, but there are a
|
mitsuhiko/celery
|
celery/serialization.py
|
Python
|
bsd-3-clause
| 4,714
| 0.000636
|
import inspect
import sys
import types
from copy import deepcopy
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError:
cpickle = None
if sys.version_info < (2, 6):
# cPickle is broken in Python <= 2.5.
# It unsafely and incorrectly uses relative instead of absolute imports,
# so e.g.:
# exceptions.KeyError
# becomes:
# celery.exceptions.KeyError
#
# Your best choice is to upgrade to Python 2.6,
# as while the pure pickle version has worse performance,
# it is the only safe option for older Python versions.
pickle = pypickle
else:
pickle = cpickle or pypickle
# BaseException was introduced in Python 2.5.
try:
_error_bases = (BaseException, )
except NameError:
_error_bases = (SystemExit, KeyboardInterrupt)
unwanted_base_classes = (StandardError, Exception) + _error_bases + (object, )
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parent, unused):
ret
|
urn types.ClassType(name, (parent,), {})
else:
def subclass_exception(name, parent, module):
return type(name, (parent,), {'__m
|
odule__': module})
def find_nearest_pickleable_exception(exc):
"""With an exception instance, iterate over its super classes (by mro)
and find the first super exception that is pickleable. It does
not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
:class:`BaseException` and :class:`object`). If that happens
you should use :exc:`UnpickleableException` instead.
:param exc: An exception instance.
:returns: the nearest exception if it's not :exc:`Exception` or below,
if it is it returns ``None``.
:rtype: :exc:`Exception`
"""
cls = exc.__class__
getmro_ = getattr(cls, "mro", None)
# old-style classes doesn't have mro()
if not getmro_:
# all Py2.4 exceptions has a baseclass.
if not getattr(cls, "__bases__", ()):
return
# Use inspect.getmro() to traverse bases instead.
getmro_ = lambda: inspect.getmro(cls)
for supercls in getmro_():
if supercls in unwanted_base_classes:
# only BaseException and object, from here on down,
# we don't care about these.
return
try:
exc_args = getattr(exc, "args", [])
superexc = supercls(*exc_args)
pickle.dumps(superexc)
except:
pass
else:
return superexc
return
def create_exception_cls(name, module, parent=None):
"""Dynamically create an exception class."""
if not parent:
parent = Exception
return subclass_exception(name, parent, module)
class UnpickleableExceptionWrapper(Exception):
"""Wraps unpickleable exceptions.
:param exc_module: see :attr:`exc_module`.
:param exc_cls_name: see :attr:`exc_cls_name`.
:param exc_args: see :attr:`exc_args`
.. attribute:: exc_module
The module of the original exception.
.. attribute:: exc_cls_name
The name of the original exception class.
.. attribute:: exc_args
The arguments for the original exception.
Example
>>> try:
... something_raising_unpickleable_exc()
>>> except Exception, e:
... exc = UnpickleableException(e.__class__.__module__,
... e.__class__.__name__,
... e.args)
... pickle.dumps(exc) # Works fine.
"""
def __init__(self, exc_module, exc_cls_name, exc_args):
self.exc_module = exc_module
self.exc_cls_name = exc_cls_name
self.exc_args = exc_args
Exception.__init__(self, exc_module, exc_cls_name, exc_args)
@classmethod
def from_exception(cls, exc):
return cls(exc.__class__.__module__,
exc.__class__.__name__,
getattr(exc, "args", []))
def restore(self):
return create_exception_cls(self.exc_cls_name,
self.exc_module)(*self.exc_args)
def get_pickleable_exception(exc):
"""Make sure exception is pickleable."""
nearest = find_nearest_pickleable_exception(exc)
if nearest:
return nearest
try:
pickle.dumps(deepcopy(exc))
except Exception:
return UnpickleableExceptionWrapper.from_exception(exc)
return exc
def get_pickled_exception(exc):
"""Get original exception from exception pickled using
:meth:`get_pickleable_exception`."""
if isinstance(exc, UnpickleableExceptionWrapper):
return exc.restore()
return exc
|
twisted/quotient
|
xquotient/exmess.py
|
Python
|
mit
| 95,030
| 0.002041
|
# -*- test-case-name: xquotient.test.test_workflow -*-
"""
This module contains the core messaging abstractions for Quotient. L{Message},
a transport-agnostic message metadata representation, and L{MailboxSelector}, a
tool for specifying constraints for and iterating sets of messages.
"""
from os import path
import re
import pytz
import zipfile
import urllib
from StringIO import StringIO
from datetime import timedelta
from zope.interface import implements
from twisted.python.components import registerAdapter
from twisted.python.util import sibpath
from twisted.web import microdom
from twisted.web.sux import ParseError
from epsilon.extime import Time
from nevow import rend, inevow, athena, static, loaders, tags, page
from nevow.athena import expose
from axiom.tags import Catalog, Tag
from axiom import item, attributes, batch
from axiom.iaxiom import IScheduler
from axiom.upgrade import registerAttr
|
ibuteCopyingUpgrader, registerUpgrader
from xmantissa import ixmantissa, people, webapp, liveform, webnav
from xmantissa.prefs import PreferenceCollectionMixin
from xmantissa.publicresource import getL
|
oader
from xmantissa.fragmentutils import PatternDictionary, dictFillSlots
from xmantissa.webtheme import ThemedElement
from xquotient import gallery, equotient, scrubber, mimeutil, mimepart, renderers
from xquotient.actions import SenderPersonFragment
from xquotient.renderers import replaceIllegalChars, ButtonRenderingMixin
LOCAL_ICON_PATH = sibpath(__file__, path.join('static', 'images', 'attachment-types'))
MAX_SENDER_LEN = 128
senderRE = re.compile('\\w{1,%i}' % MAX_SENDER_LEN, re.U)
def mimeTypeToIcon(mtype,
webIconPath='/static/Quotient/images/attachment-types',
localIconPath=LOCAL_ICON_PATH,
extension='png',
defaultIconPath='/static/Quotient/images/attachment-types/generic.png'):
lastpart = mtype.replace('/', '-') + '.' + extension
localpath = path.join(localIconPath, lastpart)
if path.exists(localpath):
return webIconPath + '/' + lastpart
return defaultIconPath
def formatSize(size, step=1024.0):
suffixes = ['bytes', 'K', 'M', 'G']
while step <= size:
size /= step
suffixes.pop(0)
if suffixes:
return '%d%s' % (size, suffixes[0])
return 'huge'
def splitAddress(emailString):
"""
Split an email address on the non-alpanumeric characters.
e.g. foo@bar.com => ['foo', 'bar', 'com']
"""
return senderRE.findall(emailString)
class _TrainingInstruction(item.Item):
"""
Represents a single user-supplied instruction to teach the spam classifier
something.
"""
message = attributes.reference()
spam = attributes.boolean()
_TrainingInstructionSource = batch.processor(_TrainingInstruction)
class _DistinctMessageSourceValue(item.Item):
"""
Stateful tracking of distinct values for L{_MessageSourceValue.value}.
"""
value = attributes.text(doc="""
The distinct value this item represents.
""", indexed=True, allowNone=False)
class _MessageSourceValue(item.Item):
"""
Value of the message 'source' attribute.
This class is an unfortunate consequence of SQLite's query optimizer
limitations. In particular, if the 'source' field of the message could be
indexed, this would not be necessary.
"""
value = attributes.text(
doc="""
The name of the message source.
""")
message = attributes.reference(
doc="""
A reference to a L{Message} object.
""")
attributes.compoundIndex(value, message)
def _addMessageSource(store, source):
"""
Register a message source. Distinct values passed to this function will be
available from the L{getMessageSources} function.
@type source: C{unicode}
@param source: A short string describing the origin of a message. This is
typically a value from the L{Message.source} attribute.
"""
store.findOrCreate(_DistinctMessageSourceValue, value=source)
def _associateMessageSource(message, source):
"""
Associate a message object with a source.
"""
_addMessageSource(message.store, source)
_MessageSourceValue(value=source,
message=message,
store=message.store)
def getMessageSources(store):
"""
Retrieve distinct message sources known by the given database.
@return: A L{axiom.store.ColumnQuery} sorted lexicographically ascending of
message sources. No message source will appear more than once.
"""
return store.query(
_DistinctMessageSourceValue,
sort=_DistinctMessageSourceValue.value.ascending).getColumn("value")
class MailboxSelector(object):
"""
A mailbox selector is a view onto a user's mailbox.
It is a wrapper around a store which contains some Message objects. In
order to get a set of messages similar to that which can be selected in the
Quotient inbox view, create one of these objects and iterate it.
The default mailbox selector will yield an iterator of all messages (up to
the number specified by its limit), like so:
for messageObject in MailboxSelector(store):
...
However, that's not a very interesting query. You can 'refine' a mailbox
selector to provide only messages which meet certain criteria. For
example, to iterate the 20 oldest unread messages tagged 'ninja' in the
inbox:
ms = MailboxSelector(store)
ms.setOldestFirst()
ms.refineByStatus(UNREAD_STATUS)
ms.refineByTag(u'ninja')
ms.refineByStatus(INBOX_STATUS)
for messageObject in ms:
...
MailboxSelector objects may be made more specific through the various
"refine" methods, but may not be made more general; if you need a more
general view, just create another one. There is no special overhead to
doing so (e.g. they do not have a database-persistent component).
"""
def __init__(self, store):
"""
Create a MailboxSelector.
@param store: an axiom L{Store}, to query for messages.
"""
self.store = store
self.statuses = []
self.addresses = []
self.earlyOut = False
self.source = None
self.tag = None
self.limit = 100
self.setOldestFirst()
def setOldestFirst(self):
"""
Change this status query to provide the oldest messages, by received
time, first.
@return: None
"""
self.ascending = True
def setNewestFirst(self):
"""
Change this status query to provide the oldest messages, by received
time, first.
@return: None
"""
self.ascending = False
def setLimit(self, limit):
"""
Set the limit of the number of messages that will be returned from the
query that is performed by this MailboxSelector.
@param limit: an integer describing the maximum number of desired
results.
"""
self.limit = limit
def refineByStatus(self, statusName):
"""
Refine this query by a particular status name. This query's results will
henceforth only contain messages with the given status.
A status is a system-defined name for a particular state in the
workflow. Various statuses are defined by the *_STATUS constants in
this module.
@param statusName: a unicode string naming the status to retrieve
messages from.
"""
if isinstance(statusName, str):
statusName = statusName.decode('ascii')
self.statuses.append(statusName)
def refineByPerson(self, person):
"""
Refine this query so that it only includes messages from the given Person.
@param person: a L{xmantissa.people.Person} instance which is
associated with messages by way of one of its email addresses.
"""
for addr in person.getEmailAddresses():
self.addresses.append(addr)
if not self.addresses:
# this per
|
tfmorris/dedupe
|
dedupe/variables/exists.py
|
Python
|
mit
| 1,117
| 0.016115
|
from .base import DerivedType
from categorical import CategoricalComparator
from .categorical_type import CategoricalType
class ExistsType(CategoricalType) :
type = "Exists"
_predicate_functions = []
def __init__(self, definition) :
super(CategoricalType, self ).__init__(definition)
self.cat_comparator = CategoricalComparator([0,1])
self.higher_vars = []
for higher_var in self.cat_comparator.dummy_names :
dummy_var = DerivedType({'name' : higher_var,
'type' : 'Dummy',
'has missing' : self.has_missing})
self.higher_vars.append(dummy_var)
def comparator(self, field_1, field_2) :
|
if field_1 and field_2 :
return self.cat_comparator(1, 1)
elif field_1 or field_2 :
return self.cat_comparator(0, 1)
else :
return self.cat_comparator(0, 0)
|
# This flag tells fieldDistances in dedupe.core to pass
# missing values (None) into the comparator
comparator.missing = True
|
cyandterry/Python-Study
|
Ninja/Leetcode/12_Integer_to_Roman.py
|
Python
|
mit
| 614
| 0.003257
|
"""
Given an integer, convert it to a roman
|
numeral.
Input is guaranteed to be within the range from 1 to 3999.
"""
class Solution:
# @return a string
def intToRoman(self, num):
digits = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD' ),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')
|
]
result = ""
for digit in digits:
while num >= digit[0]:
result += digit[1]
num -= digit[0]
if num == 0:
break
return result
|
GirlsCodePy/girlscode-coursebuilder
|
modules/search/search.py
|
Python
|
gpl-3.0
| 20,482
| 0.000244
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search module that uses Google App Engine's full text search."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import gettext
import logging
import math
import mimetypes
import os
import time
import traceback
import jinja2
import messages
import resources
import webapp2
import appengine_config
from common import crypto
from common import safe_dom
from common import schema_fields
from controllers import sites
from controllers import utils
from models import config
from models import counters
from models import courses
from models import custom_modules
|
from models import jobs
from models import services
from models import transforms
from modules.dashboard import dashboard
from google.appengine.api import namespace_manager
from google.appengine.api import search
from googl
|
e.appengine.ext import db
MODULE_NAME = 'Full Text Search'
DEPRECATED = config.ConfigProperty(
'gcb_can_index_automatically', bool, safe_dom.Text(
'This property has been deprecated; it is retained so that we '
'will not generate no-such-variable error messages for existing '
'installations that have this property set.'),
default_value=False, label='Automatically index search', deprecated=True)
SEARCH_QUERIES_MADE = counters.PerfCounter(
'gcb-search-queries-made',
'The number of student queries made to the search module.')
SEARCH_RESULTS_RETURNED = counters.PerfCounter(
'gcb-search-results-returned',
'The number of search results returned across all student queries.')
SEARCH_FAILURES = counters.PerfCounter(
'gcb-search-failures',
'The number of search failure messages returned across all student '
'queries.')
INDEX_NAME = 'gcb_search_index_loc_%s'
RESULTS_LIMIT = 10
GCB_SEARCH_FOLDER_NAME = os.path.normpath('/modules/search/')
MAX_RETRIES = 5
# Name of a per-course setting determining whether automatic indexing is enabled
AUTO_INDEX_SETTING = 'auto_index'
# I18N: Message displayed on search results page when error occurs.
SEARCH_ERROR_TEXT = gettext.gettext('Search is currently unavailable.')
class ModuleDisabledException(Exception):
"""Exception thrown when the search module is disabled."""
pass
def get_index(namespace, locale):
assert locale, 'Must have a non-null locale'
return search.Index(name=INDEX_NAME % locale, namespace=namespace)
def index_all_docs(course, incremental):
"""Index all of the docs for a given models.Course object.
Args:
course: models.courses.Course. the course to index.
incremental: boolean. whether or not to index only new or out-of-date
items.
Returns:
A dict with three keys.
'num_indexed_docs' maps to an int, the number of documents added to the
index.
'doc_type' maps to a counter with resource types as keys mapping to the
number of that resource added to the index.
'indexing_time_secs' maps to a float representing the number of seconds
the indexing job took.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
start_time = time.time()
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
timestamps, doc_types = (_get_index_metadata(index) if incremental
else ({}, {}))
for doc in resources.generate_all_documents(course, timestamps):
retry_count = 0
while retry_count < MAX_RETRIES:
try:
index.put(doc)
timestamps[doc.doc_id] = doc['date'][0].value
doc_types[doc.doc_id] = doc['type'][0].value
break
except search.Error, e:
if e.results[0].code == search.OperationResult.TRANSIENT_ERROR:
retry_count += 1
if retry_count >= MAX_RETRIES:
logging.error(
'Multiple transient errors indexing doc_id: %s',
doc.doc_id)
else:
logging.error('Failed to index doc_id: %s', doc.doc_id)
break
indexed_doc_types = collections.Counter()
for type_name in doc_types.values():
indexed_doc_types[type_name] += 1
return {'num_indexed_docs': len(timestamps),
'doc_types': indexed_doc_types,
'indexing_time_secs': time.time() - start_time}
def clear_index(namespace, locale):
"""Delete all docs in the index for a given models.Course object."""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(namespace, locale)
doc_ids = [document.doc_id for document in index.get_range(ids_only=True)]
total_docs = len(doc_ids)
while doc_ids:
index.delete(doc_ids)
doc_ids = [document.doc_id
for document in index.get_range(ids_only=True)]
return {'deleted_docs': total_docs}
def _get_index_metadata(index):
"""Returns dict from doc_id to timestamp and one from doc_id to doc_type."""
timestamps = []
doc_types = []
cursor = search.Cursor()
while cursor:
options = search.QueryOptions(
limit=1000,
cursor=cursor,
returned_fields=['date', 'type'])
query = search.Query(query_string='', options=options)
current_docs = index.search(query)
cursor = current_docs.cursor
for doc in current_docs:
timestamps.append((doc.doc_id, doc['date'][0].value))
doc_types.append((doc.doc_id, doc['type'][0].value))
return dict(timestamps), dict(doc_types)
def fetch(course, query_string, offset=0, limit=RESULTS_LIMIT):
"""Return an HTML fragment with the results of a search for query_string.
Args:
course: models.courses.Course. the course to search.
query_string: str. the user's specified query.
offset: int. the number of results to skip.
limit: int. the number of results to return.
Returns:
A dict with two keys.
'results' maps to an ordered list of resources.Result objects.
'total_found' maps to the total number of results in the index which
match query_string.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
try:
# TODO(emichael): Don't compute these for every query
returned_fields = resources.get_returned_fields()
snippeted_fields = resources.get_snippeted_fields()
options = search.QueryOptions(
limit=limit,
offset=offset,
returned_fields=returned_fields,
number_found_accuracy=100,
snippeted_fields=snippeted_fields)
query = search.Query(query_string=query_string, options=options)
results = index.search(query)
except search.Error:
logging.info('Failed searching for: %s', query_string)
return {'results': None, 'total_found': 0}
processed_results = resources.process_results(results)
return {'results': processed_results, 'total_found': results.number_found}
class SearchHan
|
tristeen/RTS-Simulator
|
rts/mMap.py
|
Python
|
mit
| 7,527
| 0.031088
|
import mUnit
from mUnit import UNIT_NONE, UNIT_SOLDIER, UNIT_BASE, UNIT_WORKER, UNITS, UNIT_RES
from mUnit import NoneUnit
from misc import mTimer
from misc.mLogger import log
from mCamp import Camp
LENGTH, HEIGTH = 20, 20
POPULATION_LIMIT = 20
class mArray(object):
def __init__(self, length, heigth):
self.length_ = length
self.heigth_ = heigth
self.data_ = [[NoneUnit for i in xrange(length)] for j in xrange(heigth)]
def __getitem__(self, pos):
return self.data_[pos]
def to_dict(self):
_data = {}
for i in xrange(self.heigth_):
for j in xrange(self.length_):
if self.data_[i][j] != NoneUnit:
_data[(i, j)] = self.data_[i][j].to_dict()
return dict(length=self.length_, heigth=self.heigth_, data=_data)
def __len__(self):
return self.length_ * self.heigth_
class mMap(object):
def __init__(self, d):
self.camps_ = {0: Camp(0, (255, 0, 0)), 1: Camp(1, (0, 0, 255))}
self.index_ = {}
self.from_dict(d)
def from_dict(self, d):
for k, v in d.get('camps', {}).iteritems():
self.camps_[k].from_dict(v)
self.data_ = mArray(d.get('length', LENGTH), d.get('heigth', HEIGTH))
for (x, y), u in d.get('data', {}).iteritems():
_unit = mUnit.Unit(u)
self.data_[x][y] = _unit
self.index_[_unit.id] = (_unit, (x, y))
_unit.set_map(self)
def to_dict(self):
d = self.data_.to_dict()
d.update({'camps': dict((k, v.to_dict()) for k, v in self.camps_.iteritems())})
return d
def add(self, (x, y), u):
self.data_[x][y] = u
#if hasattr(u, 'camp_') and u.type not in (0, 4):
# self.camps_[u.camp_].add_unit(u)
self.index_[u.id] = (u, (x, y))
def swap(self, (src_x, src_y), (dst_x, dst_y)):
self.data_[src_x][src_y], self.data_[dst_x][dst_y] = self.data_[dst_
|
x][dst_y], self.data_[src_x][src_y]
self.index_[self.data_[src_x][src_y].id] = (self.data_[src_x][src_y], (src_x, src_y))
self.index_[self.data_[dst_x][dst_y].id] = (self.data_[dst_x][dst_y], (dst_x, dst_y))
def delete(self, (x, y)):
u = self.data_[x][y]
#if hasattr(u, 'camp_') and u.type not in
|
(0, 4):
# self.camps_[u.camp_].del_unit(u)
self.index_.pop(u.id)
self.data_[x][y] = NoneUnit
#self.data_[x][y].map_ = u.map_
def get_unit_pos(self, _unit):
if _unit.id in self.index_:
return self.index_[_unit.id][1]
log.warning('get_unit_pos out of index_')
for u, i, j in self:
if u.id == _unit.id:
return i, j
log.warning('get_unit_pos return None')
def find_empty_pos(self, (x, y), r):
empty_pos = []
for i in xrange(max(0, x - r), min(self.data_.heigth_, x + r + 1)):
for j in xrange(max(0, y - r), min(self.data_.length_, y + r + 1)):
if self.data_[i][j] == NoneUnit and (x, y) != (i, j) and self.distance((x, y), (i, j)) <= r:
empty_pos.append((i, j))
return empty_pos and empty_pos[0] or None, empty_pos
def find_nearby_enemy(self, (x, y), r, _camp):
unit_pos_ = []
for i in xrange(max(0, x - r), min(self.data_.heigth_, x + r + 1)):
for j in xrange(max(0, y - r), min(self.data_.length_, y + r + 1)):
if self.data_[i][j] != NoneUnit and (i, j) != (x, y) and self.data_[i][j].camp_ != _camp and self.distance((x, y), (i, j)) <= r:
unit_pos_.append((i, j))
return unit_pos_ and unit_pos_[0] or None, unit_pos_
def find_nearby_res(self, (x, y), r):
unit_pos_ = []
for i in xrange(max(0, x - r), min(self.data_.heigth_, x + r + 1)):
for j in xrange(max(0, y - r), min(self.data_.length_, y + r + 1)):
if self.data_[i][j].name == 'UNIT_RES' and self.data_[i][j] != NoneUnit and (i, j) != (x, y) and self.distance((x, y), (i, j)) <= r:
unit_pos_.append((i, j))
return unit_pos_ and unit_pos_[0] or None, unit_pos_
def find_nearby_base(self, (x, y), r):
unit_pos_ = []
for i in xrange(max(0, x - r), min(self.data_.heigth_, x + r + 1)):
for j in xrange(max(0, y - r), min(self.data_.length_, y + r + 1)):
if self.data_[i][j].name == 'UNIT_BASE' and self.data_[i][j] != None and (i, j) != (x, y) and self.distance((x, y), (i, j)) <= r:
unit_pos_.append((i, j))
return unit_pos_ and unit_pos_[0] or None, unit_pos_
def distance(self, src_pos, dst_pos):
return abs(src_pos[0] - dst_pos[0]) + abs(src_pos[1] - dst_pos[1])
def get_unit(self, _id):
if _id in self.index_:
return self.index_[_id][0]
log.warning('get_unit out of index_')
for u, x, y in self:
if u.id == _id:
return u
log.warning('get_unit return None')
def get_unit_num(self):
num = [0, 0]
for u, _, _ in self:
if u.type in (0, 4):
continue
num[u.camp_] += 1
return num
def get_population_num(self):
num = [0, 0]
for u, _, _ in self:
if u.type in (0, 4):
continue
num[u.camp_] += u.__dict__.get('population', 0)
return num
def population_check(self, unit_type, camp):
unit = mUnit.Unit(unit_type)
population_num = self.get_population_num()[camp]
if population_num + unit.__dict__.get('population', 0) >= POPULATION_LIMIT:
return False
return True
def desc(self):
desc = []
scores = {}
for camp_id, camp in self.camps_.iteritems():
desc.append('CAMP ID: %d' % camp_id)
score, (res_score, hp_score, attacker_score) = self.calc_score(camp_id)
scores[camp_id] = score
desc.append('SCORE: %.1f, (%.2f, %.0f, %.0f)' % (score, res_score, hp_score, attacker_score))
desc.append('RES NUM: %d' % camp.res_)
stat = dict((i['name'], 0) for _, i in mUnit.UNITS.iteritems() if i['type'])
for u, _, _ in self:
if u.camp_ != camp_id:
continue
if u.name not in stat:
continue
stat[u.name] += 1
for name, num in stat.iteritems():
desc.append('%s: %d' % (name, num))
desc.append('')
desc.append('SCORE DIFF: %.1f' % (scores[0] - 1.5 * scores[1]))
return desc
def score(self):
scores = [0.0, 0.0]
#cfg = {1: 2, 2: 10, 3: 1}
for u, _, _ in self:
if u.type in (0, 4):
continue
#scores[u.camp_] += cfg[u.type]
scores[u.camp_] += u.price
for camp_id, camp in self.camps_.iteritems():
scores[camp_id] += camp.res_
return scores
def calc_score(self, camp):
#res
res = self.camps_[camp].res_
unit_res, unit_price = 0, 0
#res inc
#1. harvestable unit num
#2. returnbackable unit num
harvestable_num = 0
returnbackable_num = 0
#attack
attacker_score = 0
attack_power_sum = 0
#hp
hp_score = 0
#unit score
unit_score = 0
for u, _, _ in self:
if u.camp_ != camp:
continue
if u.can_harvest():
harvestable_num += u.harvest_count
if u.can_return():
returnbackable_num += u.harvest_count
if u.can_attack():
attacker_score += u.attack_power
attack_power_sum += u.attack_power
elif hasattr(u, 'attack_power'):
attack_power_sum += 0.5 * u.attack_power
#if u.type not in (0, 4):
if u.type not in (0, 2, 4):
hp_score += u.hp_
unit_price += u.price
if u.res_:
unit_res += u.res_
res_score = 1.2 * res + 0.8 * unit_res + 0.05 * (harvestable_num + returnbackable_num)
hp_score = 1.5 * hp_score + 1.2 * unit_price
attacker_score = 0.4 * attacker_score + attack_power_sum
return res_score + hp_score + attacker_score, (res_score, hp_score, attacker_score)
def __iter__(self):
for i in xrange(self.data_.heigth_):
for j in xrange(self.data_.length_):
yield self.data_[i][j], i, j
def __getitem__(self, pos):
return self.data_[pos]
def __repr__(self):
rows = []
for i in xrange(self.data_.heigth_):
rows.append(''.join(map(lambda x: x.to_str(), self.data_[i])))
return '\n'.join(rows)
def cleanup(self):
for u, i, j in self:
if u.dying_:
self.delete((i, j))
|
tomkralidis/geonode
|
geonode/base/migrations/0040_merge_20200321_2245.py
|
Python
|
gpl-3.0
| 263
| 0
|
#
|
Generated by Django 2.2.11 on 2020-03-21 22:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ba
|
se', '0038_delete_backup'),
('base', '0039_auto_20200321_1338'),
]
operations = [
]
|
ysig/BioClassSim
|
source/graph/__init__.py
|
Python
|
apache-2.0
| 29
| 0
|
from p
|
roximityGraph import *
| |
fozzysec/tieba-keyword-spider
|
tieba/items.py
|
Python
|
bsd-3-clause
| 730
| 0.00274
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TiebaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
p
|
ass
class ThreadItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
preview = scrapy.Field()
author = scrapy.Field()
tieba = scrapy.Field()
date = scrapy.
|
Field()
keywords = scrapy.Field()
class NoneItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
preview = scrapy.Field()
author = scrapy.Field()
tieba = scrapy.Field()
date = scrapy.Field()
keywords = scrapy.Field()
|
gibiansky/tensorflow
|
tensorflow/contrib/rnn/python/ops/rnn_cell.py
|
Python
|
apache-2.0
| 52,642
| 0.005813
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import col
|
lections
import math
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.pyth
|
on.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat_v2(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=math_ops.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or "coupled_input_forget_gate_lstm_cell",
initializer=self._initializer):
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_
|
Mause/resumable
|
main.py
|
Python
|
mit
| 673
| 0
|
from resumable import split, rebuild
import requests
def get(s):
return s
@rebuild
def example(_):
print('this is a good start')
value = split(requests.get, 'first')('http://ms.mause.me')
print(v
|
alu
|
e.text)
value = split(lambda: 'hello', 'second')()
print('hello', value)
split(print, 'derp')()
a, b, c = split(get, 'multiple')('abc')
print(a, b, c)
return split(get)('otherworldly')
def main():
arg = None
for name, func in example.items():
if func.__code__.co_argcount == 0:
arg = func()
else:
arg = func(arg)
print(arg)
if __name__ == '__main__':
main()
|
dcdanko/pyarchy
|
py_archy/py_archy.py
|
Python
|
mit
| 1,433
| 0.010468
|
# -*- coding: utf-8 -*-
def archy( obj, prefix='', unicode=True):
def chr(s):
chars = {'\u000d' : '\n',
'\u2502' : '|',
'\u2514' : '`',
'\u251c' : '+',
'\u2500' : '-',
'\u252c' : '-'}
if not unicode:
return chars[s]
return s
if type(obj) == str:
obj = { 'label': obj}
nodes = []
if 'nodes' in obj:
nodes = obj['nodes']
lines = ''
if 'label' in obj:
lines = obj['label']
lines = lines.split('\n')
splitter = '\n' + prefix + ' '
if len(nodes) > 0:
splitter = '\n
|
' + prefix + chr('\u2502') + ' '
def mapNodes(nodes):
out
|
= []
for i, node in enumerate(nodes):
last = i == len(nodes) -1
more = (type(node) != str) and ('nodes' in node) and len(node['nodes'])
prefix_ = prefix + chr('\u2502') + ' '
if last:
prefix_ = prefix + ' '
outs = (prefix +
(chr('\u2514') if last else chr('\u251c')) + chr('\u2500') +
(chr('\u252c') if more else chr('\u2500')) + ' ' +
archy(node, prefix=prefix_, unicode=unicode)[len(prefix) + 2:])
out.append(outs)
return ''.join(out)
out = prefix + splitter.join(lines) + '\n' + mapNodes(nodes)
return out
|
turbokongen/home-assistant
|
tests/components/philips_js/conftest.py
|
Python
|
apache-2.0
| 1,756
| 0.001708
|
"""Standard setup for tests."""
from unittest.mock import Mock, patch
from pytest import fixture
from homeassistant import setup
from homeassistant.components.philips_js.const import DOMAIN
from . import MOCK_CONFIG, MOCK_ENTITY_ID, MOCK_NAME, MOCK_SERIAL_NO, MOCK_SYSTEM
from tests.common import MockConfigEntry, mock_device_registry
@fixture(autouse=True)
async def setup_notification(hass):
"""Configure notification system.""
|
"
await setup.async_setup_component(hass, "persistent_notifica
|
tion", {})
@fixture(autouse=True)
def mock_tv():
"""Disable component actual use."""
tv = Mock(autospec="philips_js.PhilipsTV")
tv.sources = {}
tv.channels = {}
tv.system = MOCK_SYSTEM
with patch(
"homeassistant.components.philips_js.config_flow.PhilipsTV", return_value=tv
), patch("homeassistant.components.philips_js.PhilipsTV", return_value=tv):
yield tv
@fixture
async def mock_config_entry(hass):
"""Get standard player."""
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, title=MOCK_NAME)
config_entry.add_to_hass(hass)
return config_entry
@fixture
def mock_device_reg(hass):
"""Get standard device."""
return mock_device_registry(hass)
@fixture
async def mock_entity(hass, mock_device_reg, mock_config_entry):
"""Get standard player."""
assert await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
return MOCK_ENTITY_ID
@fixture
def mock_device(hass, mock_device_reg, mock_entity, mock_config_entry):
"""Get standard device."""
return mock_device_reg.async_get_or_create(
config_entry_id=mock_config_entry.entry_id,
identifiers={(DOMAIN, MOCK_SERIAL_NO)},
)
|
marcgibbons/drf_signed_auth
|
tests/settings.py
|
Python
|
bsd-2-clause
| 275
| 0
|
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.cont
|
rib.sessio
|
ns',
'tests',
'tests.unit'
]
ROOT_URLCONF = 'tests.integration.urls'
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
|
seekheart/coder_api
|
tests/app_test.py
|
Python
|
mit
| 5,800
| 0
|
import base64
import json
import unittest
from app import app
class AppTest(unittest.TestCase):
def setUp(self):
"""Setup method for spinning up a test instance of app"""
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
self.app = app.test_client()
self.app.testing = True
self.authorization = {
'Authorization': "Basic {user}".format(
user=base64.b64encode(b"test:asdf").decode("ascii")
)
}
self.content_type = 'application/json'
self.dummy_name = 'dummy'
self.dummy_user = json.dumps(
{'username': 'dummy', 'languages': ['testLang']}
)
self.dummy_lang = json.dumps(
{'name': 'dummy', 'users': ['No one']}
)
def tearDown(self):
"""Teardown method to cleanup after each test"""
self.app.delete('/users/dummy', headers=self.authorization)
self.app.delete('/languages/dummy', headers=self.authorization)
# Control test
def test_home(self):
"""Unit test for home"""
result = self.app.get('/')
self.assertEquals(result.status_code, 200)
# Authorization test
def test_auth_401(self):
"""Unit test for forbidden access"""
result = self.app.get('/users')
self.assertEquals(result.status_code, 401)
# Users tests
def test_users(self):
"""Unit test for getting all users"""
result = self.app.get('/users', headers=self.authorization)
self.assertEquals(result.status_code, 200)
def test_get_single_user(self):
"""Unit test for getting a user"""
result = self.app.get('/users/seekheart', headers=self.authorization)
self.assertEquals(result.status_code, 200)
def test_post_single_user(self):
"""Unit test for adding users"""
result = self.app.post('/users',
data=self.dummy_user,
headers=self.authorization,
content_type=self.content_type)
self.assertEquals(result.status_code, 201)
def test_post_bad_user(self):
"""Unit test for adding an existing user"""
bad_user = {'usr': self.dummy_name, 'lang': 'x'}
result = self.app.post('/users', data=bad_user,
headers=self.authorization,
content_type=self.content_type)
self.assertEquals(result.status_code, 400)
def test_post_duplicate_user(self):
"""Unit test for adding a duplicate user"""
self.app.post('/users',
data=self.dummy_user,
headers=self.authorization,
content_type=self.content_type)
result = self.app.post('/users',
data=self.dummy_user,
headers=self.authorization,
content_type=self.conten
|
t_type)
self.assertEquals(result.status_code, 409)
def test_patch_single_use
|
r(self):
"""Unit test for editing a user"""
self.app.post('/users', data=self.dummy_user,
headers=self.authorization,
content_type=self.content_type)
result = self.app.patch('/users/dummy', data=self.dummy_user,
headers=self.authorization,
content_type=self.content_type)
self.assertEquals(result.status_code, 204)
# Language tests
def test_languages(self):
"""Unit test for getting all languages"""
result = self.app.get('/languages', headers=self.authorization)
self.assertEquals(result.status_code, 200)
def test_get_single_language(self):
"""Unit test for getting a user"""
result = self.app.get('/languages/python', headers=self.authorization)
self.assertEquals(result.status_code, 200)
def test_post_single_language(self):
"""Unit test for adding languages"""
result = self.app.post('/languages',
headers=self.authorization,
data=self.dummy_lang,
content_type=self.content_type)
self.assertEquals(result.status_code, 201)
def test_post_bad_language(self):
"""Unit test for adding an existing language"""
bad_language = {'usr': self.dummy_name, 'lang': 'x'}
result = self.app.post('/languages', data=bad_language,
headers=self.authorization,
content_type=self.content_type)
self.assertEquals(result.status_code, 400)
def test_post_duplicate_language(self):
"""Unit test for adding a duplicate language"""
self.app.post('/languages',
headers=self.authorization,
data=self.dummy_lang,
content_type=self.content_type)
result = self.app.post('/languages',
headers=self.authorization,
data=self.dummy_lang,
content_type=self.content_type)
self.assertEquals(result.status_code, 409)
def test_patch_single_language(self):
"""Unit test for editing a language"""
self.app.post('/languages', data=self.dummy_lang,
headers=self.authorization,
content_type=self.content_type)
result = self.app.patch('/languages/dummy', data=self.dummy_lang,
headers=self.authorization,
content_type=self.content_type)
self.assertEquals(result.status_code, 204)
|
jamestwhedbee/diogenes
|
tests/test_modify.py
|
Python
|
mit
| 8,979
| 0.012585
|
import unittest
import numpy as np
from collections import Counter
from diogenes.utils import remove_cols,cast_list_of_list_to_sa
import utils_for_tests
import unittest
import numpy as np
from numpy.random import rand
import diogenes.read
import diogenes.utils
from diogenes.modify import remove_cols_where
from diogenes.modify import col_val_eq
from diogenes.modify import col_val_eq_any
from diogenes.modify import col_fewer_than_n_nonzero
from diogenes.modify import where_all_are_true
from diogenes.modify import choose_rows_where
from diogenes.modify import remove_rows_where
from diogenes.modify import row_val_eq
from diogenes.modify import row_val_lt
from diogenes.modify import row_val_between
from diogenes.modify import combine_cols
from diogenes.modify import combine_sum
from diogenes.modify import combine_mean
from diogenes.modify import label_encode
from diogenes.modify import generate_bin
from diogenes.modify import normalize
from diogenes.modify import replace_missing_vals
from diogenes.modify import distance_from_point
class TestModify(unittest.TestCase):
def test_col_val_eq(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq, 'vals': 1}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_val_eq_any(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq_any, 'vals': None}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_fewer_than_n_nonzero(self):
M = cast_list_of_list_to_sa(
[[0,2,3], [0,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_fewer_than_n_nonzero, 'vals': 2}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_label_encoding(self):
M = np.array(
[('a', 0, 'Martin'),
('b', 1, 'Tim'),
('b', 2, 'Martin'),
('c', 3, 'Martin')],
dtype=[('letter', 'O'), ('idx', int), ('name', 'O')])
ctrl = np.array(
[(0, 0, 0),
(1, 1, 1),
(1, 2, 0),
(2, 3, 0)],
dtype=[('letter', int), ('idx', int), ('name', int)])
ctrl_classes = {'letter': np.array(['a', 'b', 'c']),
'name': np.array(['Martin', 'Tim'])}
new_M, classes = label_encode(M)
self.assertTrue(np.array_equal(ctrl, new_M))
self.assertEqual(ctrl_classes.keys(), classes.keys())
for key in ctrl_classes:
self.assertTrue(np.array_equal(ctrl_classes[key], classes[key]))
def test_replace_missing_vals(self):
M = np.array([('a', 0, 0.0, 0.1),
('b', 1, 1.0, np.nan),
('', -999, np.nan, 0.0),
('d', 1, np.nan, 0.2),
('', -999, 2.0, np.nan)],
dtype=[('str', 'O'), ('int', int), ('float1', float),
('float2', float)])
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, -1.0, -1.0, 2.0])
ctrl['float2'] = np.array([0.1, -1.0, 0.0, 0.2, -1.0])
res = replac
|
e_missing_vals(M, 'constant', constant=-1.0)
self.asser
|
tTrue(np.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([100, 1, -999, 1, -999])
ctrl['float1'] = np.array([100, 1.0, np.nan, np.nan, 2.0])
ctrl['float2'] = np.array([0.1, np.nan, 100, 0.2, np.nan])
res = replace_missing_vals(M, 'constant', missing_val=0, constant=100)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([0, 1, 1, 1, 1])
res = replace_missing_vals(M, 'most_frequent', missing_val=-999)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, 1.0, 1.0, 2.0])
ctrl['float2'] = np.array([0.1, 0.1, 0.0, 0.2, 0.1])
res = replace_missing_vals(M, 'mean', missing_val=np.nan)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
def test_generate_bin(self):
M = [1, 1, 1, 3, 3, 3, 5, 5, 5, 5, 2, 6]
ctrl = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 0, 3]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
M = np.array([0.1, 3.0, 0.0, 1.2, 2.5, 1.7, 2])
ctrl = [0, 3, 0, 1, 2, 1, 2]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
def test_where_all_are_true(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = where_all_are_true(
M,
arguments)
ctrl = np.array([True, False, False])
self.assertTrue(np.array_equal(res, ctrl))
def test_choose_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = choose_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[1,2,3]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_remove_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = remove_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[2,3,4],[3,4,5]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_combine_cols(self):
M = np.array(
[(0, 1, 2), (3, 4, 5), (6, 7, 8)],
dtype=[('f0', float), ('f1', float), ('f2', float)])
ctrl_sum = np.array([1, 7, 13])
ctrl_mean = np.array([1.5, 4.5, 7.5])
res_sum = combine_cols(M, combine_sum, ('f0', 'f1'))
res_mean = combine_cols(M, combine_mean, ('f1', 'f2'))
self.assertTrue(np.array_equal(res_sum, ctrl_sum))
self.assertTrue(np.array_equal(res_mean, ctrl_mean))
def test_normalize(self):
col = np.array([-2, -1, 0, 1, 2])
res, mean, stddev = normalize(col, return_fit=True)
self.assertTrue(np.allclose(np.std(res), 1.0))
self.assertTrue(np.allclose(np.mean(res), 0.0))
col = np.arange(10)
res
|
marcelometal/holmes-api
|
holmes/search_providers/noexternal.py
|
Python
|
mit
| 2,143
| 0.0014
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from holmes.search_providers import SearchProvider
from holmes.models.review import Review
from tornado.concurrent import return_future
class NoExternalSearchProvider(SearchProvider):
def __init__(self, config, db, authnz_wrapper=None, io_loop=None):
self.db = db
def index_review(self, review):
pass
@return_future
def get_by_violation_key_name(self, key_id, current_page=1, page_size=10, domain=None, page_filter=None, callback=None):
|
reviews = Review.get_by_violation_key_name(
db=self.db,
key_id=key_id,
current_page=current_page,
page_size=page_size,
domain_filter=domain.name if domain else None,
page_filter=page_filter,
)
reviews_data = []
for item in reviews:
reviews_data.append({
'uuid': item.review_uuid,
'page': {
'uuid'
|
: item.page_uuid,
'url': item.url,
'completedAt': item.completed_date
},
'domain': item.domain_name,
})
callback({
'reviews': reviews_data
})
@return_future
def get_domain_active_reviews(self, domain, current_page=1, page_size=10, page_filter=None, callback=None):
reviews = domain.get_active_reviews(
db=self.db,
page_filter=page_filter,
current_page=current_page,
page_size=page_size,
)
pages = []
for page in reviews:
pages.append({
'url': page.url,
'uuid': str(page.uuid),
'violationCount': page.violations_count,
'completedAt': page.last_review_date,
'reviewId': str(page.last_review_uuid)
})
callback({'pages': pages})
@classmethod
def new_instance(cls, config):
return NoExternalSearchProvider(config)
@classmethod
def main(cls):
pass
if __name__ == '__main__':
NoExternalSearchProvider.main()
|
SwissTPH/openhds-sim
|
submission.py
|
Python
|
gpl-2.0
| 13,836
| 0.006577
|
#!/usr/bin/env python
"""Test form submission"""
__email__ = "nicolas.maire@unibas.ch"
__status__ = "Alpha"
from lxml import etree
import urllib2
import uuid
import logging
DEVICE_ID = "8d:77:12:5b:c1:3c"
def submit_data(data, url):
"""Submit an instance to ODKAggregate"""
r = urllib2.Request(url, data=data, headers={'Content-Type': 'application/xml'})
try:
u = urllib2.urlopen(r)
response = u.read()
return response
except urllib2.HTTPError as e:
print(e.read())
print(e.code)
print(e.info())
print(data)
def submit_from_instance_file(filename, aggregate_url):
"""Read an instance from a file and submit to ODKAggregate"""
f = open(filename, 'r')
data = f.read()
f.close()
submit_data(data, aggregate_url)
def submit_from_dict(form_dict, aggregate_url):
"""Create an instance from a dict and submit to ODKAggregate"""
root = etree.Element(form_dict["id"], id=form_dict["id"])
#TODO: deviceid should be added here, but what spelling , Id or id?
dev_id = etree.SubElement(root, "deviceid")
dev_id.text = DEVICE_ID
meta = etree.SubElement(root, "meta")
inst_id = etree.SubElement(meta, "instanceID")
inst_id.text = str(uuid.uuid1())
p_b_m = etree.SubElement(root, "processedByMirth")
p_b_m.text = '0'
etree.SubElement(root, "start")
for field in form_dict["fields"]:
if type(field[1]) == list:
el_par = etree.SubElement(root, field[0])
for sub_field in field[1]:
el = etree.SubElement(el_par, sub_field[0])
el.text = sub_field[1]
else:
el = etree.SubElement(root, field[0])
el.text = field[1]
logging.debug(form_dict)
submit_data(etree.tostring(root), aggregate_url)
def submit_baseline_individual(start, end, location_id, visit_id, fieldworker_id, individual_id, mother_id, father_id,
first_name, middle_name, last_name, gender, date_of_birth, partial_date,
date_of_visit, aggregate_url):
"""Register an individual during baseline"""
# dateOfMigration is date of visit by definition
form_dict = {"id": "baseline",
"fields": [["start", start], ["end", end],
["openhds", [["migrationType", "BASELINE"], ["locationId", location_id],
["visitId", visit_id], ["fieldWorkerId", fieldworker_id]]],
["individualInfo", [["individualId", individual_id], ["motherId", mother_id],
["fatherId", father_id], ["firstName", first_name],
["middleName", middle_name], ["lastName", last_name],
["gender", gender], ["religion", "unk"], ["dateOfBirth", date_of_birth],
["partialDate", partial_date]]],
["dateOfMigration", date_of_visit], ["warning", ""], ["visitDate", date_of_visit],
["majo4mo", "yes"], ["spelasni", "yes"]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_in_migration(start, end, migration_type, location_id, visit_id, fieldworker_id, individual_id, mother_id,
father_id, first_name, middle_name, last_name, gender, date_of_birth, partial_date,
date_of_migration, aggregate_url):
"""Register an inmigration"""
form_dict = {"id": "in_migration",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", fieldworker_id],
["migrationType", migration_type], ["locationId", location_id]]],
["individualInfo", [["individualId", individual_id], ["motherId", mother_id],
["fatherId", father_id], ["firstName", first_name],
["middleName", middle_name], ["lastName", last_name],
["gender", gender], ["dateOfBirth", date_of_birth],
["partialDate", partial_date]]],
["dateOfMigration", date_of_migration], ["warning", ""], ["origin", "other"],
["reason", "NA"], ["maritalChange", "NA"], ["reasonOther", "NA"], ["movedfrom", "NA"],
["shortorlongstay", "NA"]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_death_registration(start, individual_id, first_name, last_name, field_worker_id, visit_id, date_of_death,
place_of_death, place_of_death_other, end, aggregate_url):
form_dict = {"id": "death_registration",
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", field_worker_id], ["visitId", visit_id],
["individualId", individual_id], ["firstName", first_name],
["lastName", last_name]]],
["dateOfDeath", date_of_death], ["diagnoseddeath", ''], ["whom", ''],
["causeofdeathdiagnosed", ''], ["causofdeathnotdiagnosed", ''],
["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other],
["causeOfDeath", '']
]}
return submit_from_dict(form_dict, aggregate_url)
def submit_death_of_hoh_registration(start, end, individual_id, household_id, new_hoh_id, field_worker_id, gender,
death_within_dss, death_village, have_death_certificate, visit_id, cause_of_death,
date_of_death, place_of_death, place_of_death_other, aggregate_url):
#TODO: update form fields to lastest
form_dict = {"id": "DEATHTOHOH",
"fields": [["start", start], ["end", end],
["openhds", [["visitId", visit_id], ["fieldWorkerId", field_worker_id],
["householdId", household_id], ["individualId", individual_id],
["firstName", "first"], ["lastName", "last"], ["new_hoh_id", new_hoh_id]]],
["gender", gender], ["deathWithinDSS", death_within_dss], ["deathVillage", death_village],
["haveDeathCertificate", have_death_certificate],
["causeOfDeath
|
", cause_of_death], ["dateOfDeath", date_of_death],
["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other],
]}
return submit_from_dict(form_dict, aggregate_url)
def submit_location_registration(start, hierarchy_id, fieldworker_id, location_id, location_name, ten_cell_leader,
location_type, geopoint, end, aggregate_url):
form_dict = {"id": "location_registration",
|
"fields": [["start", start], ["end", end],
["openhds", [["fieldWorkerId", fieldworker_id], ["hierarchyId", hierarchy_id],
["locationId", location_id]]],
["locationName", location_name], ["tenCellLeader", ten_cell_leader],
["locationType", location_type], ["geopoint", geopoint]]}
return submit_from_dict(form_dict, aggregate_url)
def submit_membership(start, individual_id, household_id, fieldworker_id, relationship_to_group_head, start_date, end,
aggregate_url):
form_dict = {"id": "membership",
"fields": [["start", start], ["end", end],
["openhds", [["householdId", household_id], ["fieldWorkerId", fieldworker_id],
["individualId", individual_id]]],
["relation
|
Azure/azure-sdk-for-python
|
sdk/notificationhubs/azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/aio/_configuration.py
|
Python
|
mit
| 3,400
| 0.004706
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azu
|
re.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class NotificationHubsManagementClientConfiguration(Configuration):
"""Configuration for NotificationHubsManagementClient.
Note that all parameters used to create this
|
instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(NotificationHubsManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2017-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-notificationhubs/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.