text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import absolute_import
import weakref
from defcon.objects.base import BaseDictCompareObject
from defcon.objects.color import Color
from defcon.tools.identifiers import makeRandomIdentifier
class Guideline(BaseDictCompareObject):
"""
This object represents a guideline.
**This object posts the following notifications:**
- Guideline.Changed
- Guideline.XChanged
- Guideline.YChanged
- Guideline.AngleChanged
- Guideline.NameChanged
- Guideline.IdentifierChanged
During initialization a guideline dictionary, following the format defined
in the UFO spec, can be passed. If so, the new object will be populated
with the data from the dictionary.
"""
changeNotificationName = "Guideline.Changed"
representationFactories = {}
def __init__(self, font=None, glyph=None, guidelineDict=None):
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
if font is not None:
self.font = font
if glyph is not None:
self.glyph = glyph
super(Guideline, self).__init__()
self.beginSelfNotificationObservation()
self._dirty = False
if guidelineDict is not None:
self.x = guidelineDict.get("x")
self.y = guidelineDict.get("y")
self.angle = guidelineDict.get("angle")
self.name = guidelineDict.get("name")
self.color = guidelineDict.get("color")
self.identifier = guidelineDict.get("identifier")
# --------------
# Parent Objects
# --------------
def getParent(self):
if self._glyph is not None:
return self.glyph
elif self._font is not None:
return self.font
return None
def _get_font(self):
font = None
if self._font is None:
layerSet = self.layerSet
if layerSet is not None:
font = layerSet.font
if font is not None:
self._font = weakref.ref(font)
else:
font = self._font()
return font
def _set_font(self, font):
assert self._font is None
assert self._glyph is None
if font is not None:
font = weakref.ref(font)
self._font = font
font = property(_get_font, _set_font, doc="The :class:`Font` that this object belongs to.")
def _get_layerSet(self):
layerSet = None
if self._layerSet is None:
layer = self.layer
if layer is not None:
layerSet = layer.layerSet
if layerSet is not None:
self._layerSet = weakref.ref(layerSet)
else:
layerSet = self._layerSet()
return layerSet
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this object belongs to (if it isn't a font info guideline).")
def _get_layer(self):
layer = None
if self._layer is None:
glyph = self.glyph
if glyph is not None:
layer = glyph.layer
if layer is not None:
self._layer = weakref.ref(layer)
else:
layer = self._layer()
return layer
layer = property(_get_layer, doc="The :class:`Layer` that this object belongs to (if it isn't a font info guideline).")
def _get_glyph(self):
if self._glyph is not None:
return self._glyph()
return None
def _set_glyph(self, glyph):
assert self._font is None
assert self._glyph is None
if glyph is not None:
glyph = weakref.ref(glyph)
self._glyph = glyph
glyph = property(_get_glyph, _set_glyph, doc="The :class:`Glyph` that this object belongs to (if it isn't a font info guideline). This should not be set externally.")
# ----------
# Attributes
# ----------
# x
def _get_x(self):
return self.get("x")
def _set_x(self, value):
old = self.get("x")
if value == old:
return
self["x"] = value
self.postNotification("Guideline.XChanged", data=dict(oldValue=old, newValue=value))
x = property(_get_x, _set_x, doc="The x coordinate. Setting this will post *Guideline.XChanged* and *Guideline.Changed* notifications.")
# y
def _get_y(self):
return self.get("y")
def _set_y(self, value):
old = self.get("y")
if value == old:
return
self["y"] = value
self.postNotification("Guideline.YChanged", data=dict(oldValue=old, newValue=value))
y = property(_get_y, _set_y, doc="The y coordinate. Setting this will post *Guideline.YChanged* and *Guideline.Changed* notifications.")
# angle
def _get_angle(self):
return self.get("angle")
def _set_angle(self, value):
old = self.get("angle")
if value == old:
return
self["angle"] = value
self.postNotification("Guideline.AngleChanged", data=dict(oldValue=old, newValue=value))
angle = property(_get_angle, _set_angle, doc="The angle. Setting this will post *Guideline.AngleChanged* and *Guideline.Changed* notifications.")
# name
def _get_name(self):
return self.get("name")
def _set_name(self, value):
old = self.get("name")
if value == old:
return
self["name"] = value
self.postNotification("Guideline.NameChanged", data=dict(oldValue=old, newValue=value))
name = property(_get_name, _set_name, doc="The name. Setting this will post *Guideline.NameChanged* and *Guideline.Changed* notifications.")
# color
def _get_color(self):
return self.get("color")
def _set_color(self, color):
if color is None:
newColor = None
else:
newColor = Color(color)
oldColor = self.get("color")
if newColor == oldColor:
return
self["color"] = newColor
self.postNotification("Guideline.ColorChanged", data=dict(oldValue=oldColor, newValue=newColor))
color = property(_get_color, _set_color, doc="The guideline's :class:`Color` object. When setting, the value can be a UFO color string, a sequence of (r, g, b, a) or a :class:`Color` object. Setting this posts *Guideline.ColorChanged* and *Guideline.Changed* notifications.")
# ----------
# Identifier
# ----------
def _get_identifiers(self):
identifiers = None
parent = self.glyph
if parent is None:
parent = self.font
if parent is not None:
identifiers = parent.identifiers
if identifiers is None:
identifiers = set()
return identifiers
identifiers = property(_get_identifiers, doc="Set of identifiers for the object that this guideline belongs to. This is primarily for internal use.")
def _get_identifier(self):
return self.get("identifier")
def _set_identifier(self, value):
# don't allow overwritting an existing identifier
if self.identifier is not None:
return
oldIdentifier = self.identifier
if value == oldIdentifier:
return
# don't allow a duplicate
identifiers = self.identifiers
assert value not in identifiers
# free the old identifier
if oldIdentifier in identifiers:
identifiers.remove(oldIdentifier)
# store
self["identifier"] = value
if value is not None:
identifiers.add(value)
# post notifications
self.postNotification("Guideline.IdentifierChanged", data=dict(oldValue=oldIdentifier, newValue=value))
identifier = property(_get_identifier, _set_identifier, doc="The identifier. Setting this will post *Guideline.IdentifierChanged* and *Guideline.Changed* notifications.")
def generateIdentifier(self):
"""
Create a new, unique identifier for and assign it to the guideline.
This will post *Guideline.IdentifierChanged* and *Guideline.Changed* notifications.
"""
if self.identifier is None:
identifier = makeRandomIdentifier(existing=self.identifiers)
self.identifier = identifier
return self.identifier
# ------------------------
# Notification Observation
# ------------------------
def endSelfNotificationObservation(self):
super(Guideline, self).endSelfNotificationObservation()
self._font = None
self._fontInfo = None
self._layerSet = None
self._layer = None
self._glyph = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "b07c3b13a5eebe6eb256cbb8ba94e113",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 279,
"avg_line_length": 32.520446096654275,
"alnum_prop": 0.5983081847279378,
"repo_name": "moyogo/defcon",
"id": "2caa18c7fdd78a9bc2ef9eb4a17b34c3ac5f8753",
"size": "8748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/defcon/objects/guideline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629945"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dic'
copyright = u'2014, Zachary Sims'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5.2b1'
# The full version, including alpha/beta/rc tags.
release = '1.5.2b1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dicdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'dic.tex', u'dic Documentation',
u'Zachary Sims', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dic', u'dic Documentation',
[u'Zachary Sims'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dic', u'dic Documentation',
u'Zachary Sims', 'dic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "f8db084e7f7c40e4b141cc60e6f11f52",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 31.602459016393443,
"alnum_prop": 0.7039294514330178,
"repo_name": "zsims/dic",
"id": "e9e7bded838e062533a632c1a339419453d578d5",
"size": "8127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "27565"
}
],
"symlink_target": ""
} |
from axiom.test.historic import stubloader
from xquotient.test.historic.stub_message1to2 import attrs
from xquotient.exmess import Message, EVER_DEFERRED_STATUS
from xquotient.mimestorage import Part
ignored = object()
class MessageUpgradeTest(stubloader.StubbedTest):
def testUpgrade(self):
m = self.store.findUnique(Message)
for (k, v) in attrs.iteritems():
newv = getattr(m, k, ignored)
if newv is not ignored:
self.assertEquals(v, newv)
self.assertIdentical(self.store.findUnique(Part), m.impl)
self.failIf(m.hasStatus(EVER_DEFERRED_STATUS))
| {
"content_hash": "b80a99aef5d7073b41e4d9b50e80379f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 39,
"alnum_prop": 0.7035256410256411,
"repo_name": "twisted/quotient",
"id": "223b0cdd95bfacd2e88d23da11a153565f1d3cbd",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xquotient/test/historic/test_message2to3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13968"
},
{
"name": "JavaScript",
"bytes": "354447"
},
{
"name": "Python",
"bytes": "890995"
}
],
"symlink_target": ""
} |
from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiEquipment(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None, log_level='INFO', request_context=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiEquipment, self).__init__(
networkapi_url,
user,
password,
user_ldap,
request_context
)
def get_equipment(self, **kwargs):
"""
Return list environments related with environment vip
"""
uri = 'api/v3/equipment/'
uri = self.prepare_url(uri, kwargs)
return super(ApiEquipment, self).get(uri)
def search(self, **kwargs):
"""
Method to search equipments based on extends search.
:param search: Dict containing QuerySets to find equipments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments
"""
return super(ApiEquipment, self).get(self.prepare_url('api/v3/equipment/',
kwargs))
def get(self, ids, **kwargs):
"""
Method to get equipments by their ids
:param ids: List containing identifiers of equipments
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing equipments
"""
url = build_uri_with_ids('api/v3/equipment/%s/', ids)
return super(ApiEquipment, self).get(self.prepare_url(url, kwargs))
def delete(self, ids):
"""
Method to delete equipments by their id's
:param ids: Identifiers of equipments
:return: None
"""
url = build_uri_with_ids('api/v3/equipment/%s/', ids)
return super(ApiEquipment, self).delete(url)
def update(self, equipments):
"""
Method to update equipments
:param equipments: List containing equipments desired to updated
:return: None
"""
data = {'equipments': equipments}
equipments_ids = [str(env.get('id')) for env in equipments]
return super(ApiEquipment, self).put('api/v3/equipment/%s/' %
';'.join(equipments_ids), data)
def create(self, equipments):
"""
Method to create equipments
:param equipments: List containing equipments desired to be created on database
:return: None
"""
data = {'equipments': equipments}
return super(ApiEquipment, self).post('api/v3/equipment/', data)
| {
"content_hash": "5724d88fe24883fcb9a99344c90305b2",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 111,
"avg_line_length": 35.797872340425535,
"alnum_prop": 0.6127786032689451,
"repo_name": "globocom/GloboNetworkAPI-client-python",
"id": "1980227b5c58eb327c8b42ea4f12c3961aefb302",
"size": "4170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networkapiclient/ApiEquipment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "186"
},
{
"name": "Makefile",
"bytes": "2840"
},
{
"name": "Python",
"bytes": "767058"
}
],
"symlink_target": ""
} |
import unittest
from command import check_parent_request
from command import check_child_id_request
from command import CheckType
import config
import mac802154
import message
import mle
import node
LEADER = 1
REED = 2
SED = 3
class Cert_6_1_2_REEDAttach_SED(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i, (i == SED), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[REED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[REED].set_panid(0xface)
self.nodes[REED].set_mode('rsdn')
self.nodes[REED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[REED].add_whitelist(self.nodes[SED].get_addr64())
self.nodes[REED].enable_whitelist()
self.nodes[REED].set_router_upgrade_threshold(0)
self.nodes[SED].set_panid(0xface)
self.nodes[SED].set_mode('s')
self.nodes[SED].add_whitelist(self.nodes[REED].get_addr64())
self.nodes[SED].enable_whitelist()
self.nodes[SED].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[REED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED].get_state(), 'child')
self.nodes[SED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED].get_state(), 'child')
self.assertEqual(self.nodes[REED].get_state(), 'router')
sed_messages = self.simulator.get_messages_sent_by(SED)
# Step 2 - DUT sends MLE Parent Request
msg = sed_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
check_parent_request(msg, is_first_request=True)
# Step 4 - DUT sends MLE Parent Request again
msg = sed_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
check_parent_request(msg, is_first_request=False)
# Step 6 - DUT sends Child ID Request
msg = sed_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST, sent_to_node=self.nodes[REED])
check_child_id_request(msg, address_registration=CheckType.CONTAIN,
tlv_request=CheckType.CONTAIN, mle_frame_counter=CheckType.OPTIONAL,
route64=CheckType.OPTIONAL)
# Wait DEFAULT_CHILD_TIMEOUT seconds,
# ensure SED has received the CHILD_ID_RESPONSE,
# and the next data requests would be keep-alive messages
self.simulator.go(config.DEFAULT_CHILD_TIMEOUT)
sed_messages = self.simulator.get_messages_sent_by(SED)
# Step 11 - SED sends periodic 802.15.4 Data Request messages
msg = sed_messages.next_message()
self.assertEqual(False, msg.isMacAddressTypeLong()) # Extra check, keep-alive messages are of short types of mac address
self.assertEqual(msg.type, message.MessageType.COMMAND)
self.assertEqual(msg.mac_header.command_type, mac802154.MacHeader.CommandIdentifier.DATA_REQUEST)
# Step 12 - REED sends ICMPv6 echo request, to DUT link local address
sed_addrs = self.nodes[SED].get_addrs()
for addr in sed_addrs:
if addr[0:4] == 'fe80':
self.assertTrue(self.nodes[REED].ping(addr))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "982b32e0b5dead1e67186544fc816e6a",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 131,
"avg_line_length": 38.24742268041237,
"alnum_prop": 0.6539083557951483,
"repo_name": "erja-gp/openthread",
"id": "9ddedb1042102801b55c52339955ba8d56ccbe95",
"size": "5314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_6_1_02_REEDAttach_SED.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "940119"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4306681"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "133368"
},
{
"name": "Python",
"bytes": "2012919"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "74907"
}
],
"symlink_target": ""
} |
import logging
from getgauge.python import Messages
from kylin_utils import util
_array_types = (list, tuple, set)
_object_types = (dict, )
def api_response_equals(actual, expected, ignore=None):
if ignore is None:
ignore = []
def _get_value(ignore):
def get_value(key, container):
if isinstance(container, _object_types):
return container.get(key)
if isinstance(container, _array_types):
errmsg = ''
for item in container:
try:
api_response_equals(item, key, ignore=ignore)
return item
except AssertionError as e:
errmsg += str(e) + '\n'
raise AssertionError(errmsg)
return None
return get_value
getvalue = _get_value(ignore)
assert_failed = AssertionError(
f'assert json failed, expected: [{expected}], actual: [{actual}]')
if isinstance(expected, _array_types):
if not isinstance(actual, _array_types):
raise assert_failed
for item in expected:
api_response_equals(getvalue(item, actual), item, ignore=ignore)
elif isinstance(expected, _object_types):
if not isinstance(actual, _object_types):
raise assert_failed
for key, value in expected.items():
if key not in ignore:
api_response_equals(getvalue(key, actual),
value,
ignore=ignore)
else:
if key not in actual:
raise assert_failed
else:
if actual != expected:
raise assert_failed
INTEGER_FAMILY = ['TINYINT', 'SMALLINT', 'INTEGER', 'BIGINT', 'INT']
FRACTION_FAMILY = ['DECIMAL', 'DOUBLE', 'FLOAT']
STRING_FAMILY = ['CHAR', 'VARCHAR', 'STRING']
def _is_family(datatype1, datatype2):
if datatype1 in STRING_FAMILY and datatype2 in STRING_FAMILY:
return True
if datatype1 in FRACTION_FAMILY and datatype2 in FRACTION_FAMILY:
return True
if datatype1 in INTEGER_FAMILY and datatype2 in INTEGER_FAMILY:
return True
return datatype1 == datatype2
class _Row(tuple):
def __init__(self, values, types, type_nums): # pylint: disable=unused-argument
"""
:param values: results of query response
:param types: columnTypeName of query response
:param type_nums: columnType of query response. check columnType equal when columnTypeName is not family
"""
tuple.__init__(self)
if len(values) != len(types):
raise ValueError('???')
self._types = types
self._type_nums = type_nums
self._has_fraction = False
for datatype in self._types:
if datatype in FRACTION_FAMILY:
self._has_fraction = True
def __new__(cls, values, types, type_nums): # pylint: disable=unused-argument
return tuple.__new__(cls, values)
def __eq__(self, other):
if not self._has_fraction or not other._has_fraction:
return tuple.__eq__(self, other)
if len(self._types) != len(other._types):
return False
for i in range(len(self._types)):
stype = self._types[i]
otype = other._types[i]
if not _is_family(stype, otype):
if not self._type_nums or not other._type_nums:
return False
if self._type_nums[i] != other._type_nums[i]:
return False
svalue = self[i]
ovalue = other[i]
if svalue is None or ovalue is None:
if svalue == ovalue:
continue
else:
return False
if stype in FRACTION_FAMILY:
fsvalue = float(svalue)
fovalue = float(ovalue)
diff = abs(fsvalue - fovalue)
rate = diff / min(fsvalue, fovalue) if fsvalue != 0 and fovalue != 0 else diff
if abs(rate) > 0.01:
return False
else:
if svalue != ovalue:
return False
return True
def __hash__(self):
# Always use __eq__ to compare
return 0
def query_result_equals(expect_resp, actual_resp, compare_level="data_set"):
expect_column_types = [x['columnTypeName'] for x in expect_resp['columnMetas']]
expect_column_numbers = [x['columnType'] for x in expect_resp['columnMetas']]
expect_result = [[y.strip() if y else y for y in x] for x in expect_resp['results']]
actual_column_types = [x['columnTypeName'] for x in actual_resp['columnMetas']]
actual_column_numbers = [x['columnType'] for x in actual_resp['columnMetas']]
actual_result = [[y.strip() if y else y for y in x] for x in actual_resp['results']]
if len(expect_column_types) != len(actual_column_types):
Messages.write_message('column count assert failed [{0},{1}]'.format(len(expect_column_types), len(actual_column_types)))
logging.error('column count assert failed [%s,%s]', len(expect_column_types),
len(actual_column_types))
return False
if compare_level == "data_set":
return dataset_equals(
expect_result,
actual_result,
expect_column_types,
actual_column_types,
expect_column_numbers,
actual_column_numbers
)
if compare_level == "row_count":
return row_count_equals(expect_result, actual_result)
def row_count_equals(expect_result, actual_result):
if len(expect_result) != len(actual_result):
Messages.write_message('row count assert failed [{0},{1}]'.format(len(expect_result), len(actual_result)))
logging.error('row count assert failed [%s,%s]', len(expect_result), len(actual_result))
return False
return True
def dataset_equals(expect, actual, expect_col_types=None, actual_col_types=None, expect_col_nums=None,
actual_col_nums=None):
if len(expect) != len(actual):
Messages.write_message('row count assert failed [{0},{1}]'.format(len(expect), len(actual)))
logging.error('row count assert failed [%s,%s]', len(expect), len(actual))
return False
if expect_col_types is None:
expect_col_types = ['VARCHAR'] * len(expect[0])
expect_set = set()
for values in expect:
expect_set.add(_Row(values, expect_col_types, expect_col_nums))
if actual_col_types is None:
actual_col_types = expect_col_types if expect_col_types else ['VARCHAR'] * len(actual[0])
actual_set = set()
for values in actual:
actual_set.add(_Row(values, actual_col_types, actual_col_nums))
assert_result = expect_set ^ actual_set
if assert_result:
logging.error('diff[%s]', len(assert_result))
print(assert_result)
Messages.write_message("\nDiff {0}".format(assert_result))
return False
return True
def compare_sql_result(sql, project, kylin_client, compare_level="data_set", cube=None, expected_result=None):
pushdown_project = kylin_client.pushdown_project
if not util.if_project_exists(kylin_client=kylin_client, project=pushdown_project):
kylin_client.create_project(project_name=pushdown_project)
hive_tables = kylin_client.list_hive_tables(project_name=project)
if hive_tables is not None:
for table_info in kylin_client.list_hive_tables(project_name=project):
if table_info.get('source_type') == 0:
kylin_client.load_table(project_name=pushdown_project,
tables='{database}.{table}'.format(
database=table_info.get('database'),
table=table_info.get('name')))
kylin_resp = kylin_client.execute_query(cube_name=cube,
project_name=project,
sql=sql)
assert kylin_resp.get('isException') is False, 'Thrown Exception when execute ' + sql
pushdown_resp = kylin_client.execute_query(project_name=pushdown_project, sql=sql)
assert pushdown_resp.get('isException') is False
assert query_result_equals(pushdown_resp, kylin_resp, compare_level=compare_level), Messages.write_message("Query result is different with pushdown query result {0}, \n------------------------------------\n Actual result is {1} \n\n Expected result is {2}".format(sql, kylin_resp.get('results'), pushdown_resp.get('results')))
if expected_result is not None:
assert expected_result.get("cube") == kylin_resp.get("cube"), Messages.write_message("Sql {0} \n------------------------------------\n Query cube is different with json file, actual cube is {1}, expected cube is {2}".format(sql, kylin_resp.get("cube"), expected_result.get("cube")))
if kylin_resp.get("cuboidIds") is not None:
assert expected_result.get("cuboidIds") == kylin_resp.get("cuboidIds"), Messages.write_message("Sql {0} \n------------------------------------\n query cuboidIds is different with json file, actual cuboidIds is {1}, expected cuboidIds is {2}".format(sql, kylin_resp.get("cuboidIds"), expected_result.get("cuboidIds")))
assert expected_result.get("totalScanCount") == kylin_resp.get("totalScanCount"), Messages.write_message("Sql {0} \n------------------------------------\n query totalScanCount is different with json file, actual totalScanCount is {1}, expected totalScanCount is {2}".format(sql, kylin_resp.get("totalScanCount"), expected_result.get("totalScanCount")))
assert expected_result.get("totalScanFiles") == kylin_resp.get("totalScanFiles"), Messages.write_message("Sql {0} \n------------------------------------\n query totalScanFiles is different with json file, actual totalScanFiles is {1}, expected totalScanFiles is {2}".format(sql, kylin_resp.get("totalScanFiles"), expected_result.get("totalScanFiles")))
assert expected_result.get("pushDown") == kylin_resp.get("pushDown"), Messages.write_message("Sql {0} \n------------------------------------\n query pushDown is different with json file, actual pushDown is {1}, expected pushDown is {2}".format(sql, kylin_resp.get("pushDown"), expected_result.get("pushDown"))) | {
"content_hash": "007562836be9b3b77e0fcdbd9482af45",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 360,
"avg_line_length": 43.729166666666664,
"alnum_prop": 0.5940924249642687,
"repo_name": "apache/kylin",
"id": "e352736d95498eda7670d4ef35c8a7e1cd467880",
"size": "11296",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/CI/kylin-system-testing/kylin_utils/equals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32751"
},
{
"name": "C++",
"bytes": "594708"
},
{
"name": "CSS",
"bytes": "98255"
},
{
"name": "Dockerfile",
"bytes": "28814"
},
{
"name": "FreeMarker",
"bytes": "49504"
},
{
"name": "HTML",
"bytes": "511983"
},
{
"name": "Inno Setup",
"bytes": "1219521"
},
{
"name": "Java",
"bytes": "7840516"
},
{
"name": "JavaScript",
"bytes": "1838206"
},
{
"name": "Less",
"bytes": "51933"
},
{
"name": "Python",
"bytes": "113668"
},
{
"name": "Scala",
"bytes": "704894"
},
{
"name": "Shell",
"bytes": "215104"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name = "parallelpandas",
version = "0.1",
packages = find_packages(),
scripts = [],
install_requires=['dill>=0.2',
'pandas>=0.14'],
# metadata for upload to PyPI
author = "Philipp Metzner",
author_email = "philipp.metzner@gameduell.de",
description = "parallel version of some pandas function",
license = "MIT",
keywords = "pandas mutliprocessing parallel",
url = "https://github.com/GameDuell/parallelpandas",
)
| {
"content_hash": "b47c65a73272eacfd4d4ccf894c39597",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 31.470588235294116,
"alnum_prop": 0.6317757009345795,
"repo_name": "gameduell/parallelpandas",
"id": "4a0a87ce73525dc27ce39c2c7848b0da6fa2e080",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4654"
}
],
"symlink_target": ""
} |
"""Resource Open Service Interface Definitions
resource version 3.0.0
The Resource OSID defines a service to access and manage a directory of
objects.
Resources
``Resources`` may represent people, places or a set or arbitrary
entities that are used throughout the OSIDs as references to indirect
objects. In core OSID, ``Resources`` have no other meaning other than to
provide an identifier and a relation to an authentication principal.
``Resource`` ``Types`` may define extra data to define an employee,
organizational unit or an inventory item.
``Resources`` are referenced throughout the OSIDs to and the abstraction
level of this service provides a consistent interface with which to
access and manage object references not directly pertinent to the
service in play. For example, a Repository OSID may reference
``Resources`` as authors or a Course OSID may reference ``Resources``
for students and instructors. Each of these OSIDs may orchestrate a
Resource OSID to provide management of the set of referenced resources.
A ``Resource`` genus Type may be used to provide a label the kind of
resource. This service offers the flexibility that the producer of a
film may be a person, a production company, or a fire hydrant. While
genus ``Types`` may be used to constrain the kinds of ``Resources`` that
may be related to various ``OsidObjects`` if necessary ``,`` OSID
Consumers are expected to simply use the Resource as a reference. If an
OSID Consumer wishes to provide a mechanism for updating a ``Resource``
referenced, the OSID Consumer should use an orchestrated Resource OSID.
Agents
A ``Resource`` also provides the mapping between an authentication
``Agent`` and the entity on whose behalf the agent is acting. An
``Agent`` can only map to a single ``Resource`` while a ``Resource`` can
have multiple ``Agents``. An agent that represents the unix login of
"vijay" on server due.mit.edu can map to a ``Resource`` representing
Vijay Kumar, who may also have a campus agent of "vkumar@mit.edu."
Group
When a ``Resource`` is referenced in another OSID, it is a singular
entity. To provide groupings of multiple people or things, a
``Resource`` can also be defined as a hierarchical group of other
resources. Whether a resource is a single entity or a group is an
attribute of the ``Resource`` itself. If a ``Resource`` is a group, then
its membership can be queried or managed in one of the group sessions.
This overloading of the object definition serves to keep the nature of
the resource separate from the other OSIDs such that a message to a
"group", for example, is referenced as a single resource receipient.
Other OSIDs are blind to whether or not a referenced ``Resource`` is a
group or a singular entity..
Resource Relationships
For kicks, ``Resources`` may have arbitrrary relationships to other
``Resources`` using the ``ResourceRelationship`` interface. Resource
relationships may also be used to provide a place to describe in more
detail, or hang data, on a member to group relationship.
Bin Cataloging
``Resources`` may be mapped into hierarchial ``Bins`` for the purpose of
cataloging or federation.
Sub Packages
The Resource OSID includes a Resource Demographic OSID for managing
dynamically generated populations of ``Resources`` and a Resource Batch
OSID for managing ``Resources`` in bulk.
"""
| {
"content_hash": "583b5bca61ae3a0efef56966a64845c2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 72,
"avg_line_length": 45.148648648648646,
"alnum_prop": 0.7818018557318168,
"repo_name": "birdland/dlkit-doc",
"id": "ae642c533fa3800b06a8e9529f4ce1f2b1656232",
"size": "3365",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dlkit/mongo/resource/summary_doc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12458859"
}
],
"symlink_target": ""
} |
import abc
import numpy as np
from scipy._lib._util import check_random_state
from ..utils import (_chol, _chol_downdate, _chol_update, _chol_logdet,
_chol_solve, _normal_rvs, _normal_logpdf, _wishart_rvs,
_wishart_logpdf, _t_logpdf)
class GenericMixture(object):
"""
Class which encapsulates common functionality between all mixture models.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
super(GenericMixture, self).__init__()
class _Param(object):
__metaclass__ = abc.ABCMeta
def __init__(self, random_state):
self._random_state = check_random_state(random_state)
@abc.abstractmethod
def dump(self):
raise NotImplementedError
class DrawParam(_Param):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw_x_n(self):
raise NotImplementedError
class InferParam(_Param):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def update(self, x):
raise NotImplementedError
@abc.abstractmethod
def downdate(self, x):
raise NotImplementedError
@abc.abstractmethod
def iterate(self, compute_log_likelihood=False):
raise NotImplementedError
@abc.abstractmethod
def iterate_to(self, mixture_param, compute_log_likelihood=False):
raise NotImplementedError
@abc.abstractmethod
def log_likelihood(self, x):
raise NotImplementedError
class CollapsedConjugateGaussianMixture(GenericMixture):
"""
Collapsed conjugate Gaussian mixture model. Parametrization according to
Görür and Rasmussen (2010).
Parameters
----------
xi : None or array-like, optional
xi hyperparameter
rho : None or float, optional
rho hyperparameter
beta : None or float, optional
beta hyperparameter. Must be larger than the dimension of xi minus one
W : None or array-like, optional
W hyperparameter
References
----------
Görür, D. and Rasmussen, C. E. (2010). Dirichlet process Gaussian mixture
models: Choice of the base distribution. Journal of Computer Science
and Technology, 25(4): 615-626.
"""
def __init__(self, xi=None, rho=1.0, beta=1.0, W=1.0):
super(CollapsedConjugateGaussianMixture, self).__init__()
self.dim, self.xi, self.rho, self.beta, self.W = \
self._check_parameters(None, xi, rho, beta, W)
self._rho_xi = self.rho * self.xi
self._beta_W_chol = _chol(self.dim, self.beta*self.W)
@staticmethod
def _check_parameters(dim, xi, rho, beta, W):
# Try to infer dimensionality
if dim is None:
if xi is None:
if W is None:
dim = 1
else:
W = np.asarray(W, dtype=float)
if W.ndim < 2:
dim = 1
else:
dim = W.shape[0]
else:
xi = np.asarray(xi, dtype=float)
dim = xi.size
else:
if not np.isscalar(dim):
msg = ("Dimension of random variable must be a scalar.")
raise ValueError(msg)
# Check input sizes and return full arrays for xi and W if necessary
if xi is None:
xi = np.zeros(dim, dtype=float)
xi = np.asarray(xi, dtype=float)
if W is None:
W = 1.0
W = np.asarray(W, dtype=float)
if dim == 1:
xi.shape = (1,)
W.shape = (1, 1)
if xi.ndim != 1 or xi.shape[0] != dim:
msg = ("Array 'xi' must be a vector of length %d." % dim)
raise ValueError(msg)
if W.ndim == 0:
W = W * np.eye(dim)
elif W.ndim == 1:
W = np.diag(W)
elif W.ndim == 2 and W.shape != (dim, dim):
rows, cols = W.shape
if rows != cols:
msg = ("Array 'W' must be square if it is two-dimensional,"
" but W.shape = %s." % str(W.shape))
else:
msg = ("Dimension mismatch: array 'W' is of shape %s,"
" but 'xi' is a vector of length %d.")
msg = msg % (str(W.shape), len(xi))
raise ValueError(msg)
elif W.ndim > 2:
raise ValueError("Array 'W' must be at most two-dimensional,"
" but W.ndim = %d" % W.ndim)
if rho is None:
rho = 1.0
elif not np.isscalar(rho):
raise ValueError("Float 'rho' must be a scalar.")
elif rho <= 0.0:
raise ValueError("Float 'rho' must be larger than zero, but"
" rho = %f" % rho)
if beta is None:
beta = dim
elif not np.isscalar(beta):
raise ValueError("Float 'beta' must be a scalar.")
elif beta <= dim - 1:
raise ValueError("Float 'beta' must be larger than the dimension"
" minus one, but beta = %f" % beta)
return dim, xi, rho, beta, W
@classmethod
def _check_mixture_model(cls, mixture_model):
if not isinstance(mixture_model, cls):
raise ValueError("'mixture_model' must be a collapsed conjugate"
" Gaussian mixture model."
" Got mixture_model = %r" % mixture_model)
return mixture_model
def _ms_log_prior(self, mixture_param):
return 0.0
def _ms_log_likelihood(self, x_n, inv_c, mixture_param, random_state):
"""
Logarithm of the likelihood appearing in the M-H acceptance ratio used
by the merge-split samplers.
"""
ret = 0.0
mixture_param = self.InferParam(self, random_state)
# The order in which the for loop is enumerated does not have an
# influence on the result.
for index, l in enumerate(inv_c):
ret += mixture_param.log_likelihood(x_n[l])
if index < len(inv_c)-1:
mixture_param.update(x_n[l])
return ret
class DrawParam(GenericMixture.DrawParam):
def __init__(self, mixture_model, random_state):
super(CollapsedConjugateGaussianMixture.DrawParam, self).__init__(
random_state)
self._mixture_model = \
CollapsedConjugateGaussianMixture._check_mixture_model(
mixture_model)
self._mu_c = None
self._S_c = None
@property
def mu_c(self):
if self._mu_c is None:
mm = self._mixture_model
_, S_c_chol = self.S_c
self._mu_c = _normal_rvs(mm.dim, mm.xi,
np.sqrt(mm.rho)*S_c_chol, self._random_state)
return self._mu_c
@property
def S_c(self):
if self._S_c is None:
mm = self._mixture_model
S_c = _wishart_rvs(mm.dim, mm.beta, mm._beta_W_chol,
self._random_state)
S_c_chol = _chol(mm.dim, S_c)
self._S_c = (S_c, S_c_chol)
return self._S_c
def draw_x_n(self):
mm = self._mixture_model
_, S_c_chol = self.S_c
x_n = _normal_rvs(mm.dim, self.mu_c, S_c_chol, self._random_state)
return x_n
def phi_c(self):
S_c, _ = self.S_c
return {'mean': self.mu_c, 'precision': S_c}
def dump(self):
return self._mu_c, self._S_c
class InferParam(GenericMixture.InferParam):
def __init__(self, mixture_model, random_state):
super(CollapsedConjugateGaussianMixture.InferParam,
self).__init__(random_state)
self._mixture_model = \
CollapsedConjugateGaussianMixture._check_mixture_model(
mixture_model)
self._rho_c = None
self._beta_c = None
self._xsum_c = None
self._xi_c = None
self._beta_W_c_chol = None
self._df = None
self._scale = None
@property
def rho_c(self):
if self._rho_c is None:
return self._mixture_model.rho
else:
return self._rho_c
@property
def beta_c(self):
if self._beta_c is None:
return self._mixture_model.beta
else:
return self._beta_c
@property
def xi_c(self):
if self._xi_c is None:
return self._mixture_model.xi
else:
return self._xi_c
@property
def beta_W_c_chol(self):
if self._beta_W_c_chol is None:
return self._mixture_model._beta_W_chol
else:
return self._beta_W_c_chol
@property
def df(self):
if self._df is None:
self._df = self.beta_c - self._mixture_model.dim + 1.0
return self._df
@property
def scale(self):
if self._scale is None:
scale_chol = np.sqrt((self.rho_c+1.0) / \
(self.rho_c*self.df)) * self.beta_W_c_chol
scale_logdet = _chol_logdet(self._mixture_model.dim,
scale_chol)
self._scale = (scale_chol, scale_logdet)
return self._scale
def update(self, x):
mm = self._mixture_model
if self._rho_c is None:
self._rho_c = mm.rho + 1.0
else:
self._rho_c += 1.0
if self._beta_c is None:
self._beta_c = mm.beta + 1.0
else:
self._beta_c += 1.0
if self._xsum_c is None:
self._xsum_c = np.array(x, copy=True)
else:
self._xsum_c += x
self._xi_c = (mm._rho_xi + self._xsum_c) / self._rho_c
if self._beta_W_c_chol is None:
self._beta_W_c_chol = np.array(mm._beta_W_chol, copy=True)
_chol_update(mm.dim, self._beta_W_c_chol,
np.sqrt(self._rho_c/(self._rho_c-1.0)) * (x-self._xi_c))
# TODO: Find better way to do this
self._df = None
self._scale = None
return self
def downdate(self, x):
mm = self._mixture_model
if self._rho_c is None:
raise ValueError('rho_c must be updated before it can be'
' downdated')
else:
self._rho_c -= 1.0
if self._beta_c is None:
raise ValueError('beta_c must be updated before it can be'
' downdated')
else:
self._beta_c -= 1.0
if self._xsum_c is None:
raise ValueError('xsum_c must be updated before it can be'
' downdated')
else:
self._xsum_c -= x
self._xi_c = (mm._rho_xi + self._xsum_c) / self._rho_c
if self._beta_W_c_chol is None:
raise ValueError('beta_W_c_chol must be updated before it can'
' be downdated')
_chol_downdate(mm.dim, self._beta_W_c_chol,
np.sqrt(self._rho_c/(self._rho_c+1.0)) * (x-self._xi_c))
# TODO: Find better way to do this
self._df = None
self._scale = None
return self
def iterate(self, compute_log_likelihood=False):
# TODO: Could this be a better solution?
# self._df = None
# self._scale = None
return 0.0
def iterate_to(self, mixture_param, compute_log_likelihood=False):
# TODO: Could this be a better solution?
# self._df = None
# self._scale = None
return 0.0
def log_likelihood(self, x):
scale_chol, scale_logdet = self.scale
ret = _t_logpdf(x, self._mixture_model.dim, self.xi_c,
self.df, scale_chol, scale_logdet)
return ret
def phi_c(self):
# TODO: Draw component parameters on demand from conditional
# posterior?
return None
def dump(self):
return self._rho_c, self._beta_c, self._xsum_c, self._xi_c, \
self._beta_W_c_chol, self._df, self._scale
class ConjugateGaussianMixture(GenericMixture):
"""
Conjugate Gaussian mixture model. Parametrization according to
Görür and Rasmussen (2010).
Parameters
----------
xi : None or array-like, optional
xi hyperparameter
rho : None or float, optional
rho hyperparameter
beta : None or float, optional
beta hyperparameter. Must be larger than the dimension of xi minus one
W : None or array-like, optional
W hyperparameter
References
----------
Görür, D. and Rasmussen, C. E. (2010). Dirichlet process Gaussian mixture
models: Choice of the base distribution. Journal of Computer Science
and Technology, 25(4): 615-626.
"""
def __init__(self, xi=None, rho=1.0, beta=1.0, W=1.0):
super(ConjugateGaussianMixture, self).__init__()
self.dim, self.xi, self.rho, self.beta, self.W = \
self._check_parameters(None, xi, rho, beta, W)
self._rho_xi = self.rho * self.xi
self._beta_W_chol = _chol(self.dim, self.beta*self.W)
@staticmethod
def _check_parameters(dim, xi, rho, beta, W):
# Try to infer dimensionality
if dim is None:
if xi is None:
if W is None:
dim = 1
else:
W = np.asarray(W, dtype=float)
if W.ndim < 2:
dim = 1
else:
dim = W.shape[0]
else:
xi = np.asarray(xi, dtype=float)
dim = xi.size
else:
if not np.isscalar(dim):
msg = ("Dimension of random variable must be a scalar.")
raise ValueError(msg)
# Check input sizes and return full arrays for xi and W if necessary
if xi is None:
xi = np.zeros(dim, dtype=float)
xi = np.asarray(xi, dtype=float)
if W is None:
W = 1.0
W = np.asarray(W, dtype=float)
if dim == 1:
xi.shape = (1,)
W.shape = (1, 1)
if xi.ndim != 1 or xi.shape[0] != dim:
msg = ("Array 'xi' must be a vector of length %d." % dim)
raise ValueError(msg)
if W.ndim == 0:
W = W * np.eye(dim)
elif W.ndim == 1:
W = np.diag(W)
elif W.ndim == 2 and W.shape != (dim, dim):
rows, cols = W.shape
if rows != cols:
msg = ("Array 'W' must be square if it is two-dimensional,"
" but W.shape = %s." % str(W.shape))
else:
msg = ("Dimension mismatch: array 'W' is of shape %s,"
" but 'xi' is a vector of length %d.")
msg = msg % (str(W.shape), len(xi))
raise ValueError(msg)
elif W.ndim > 2:
raise ValueError("Array 'W' must be at most two-dimensional,"
" but W.ndim = %d" % W.ndim)
if rho is None:
rho = 1.0
elif not np.isscalar(rho):
raise ValueError("Float 'rho' must be a scalar.")
elif rho <= 0.0:
raise ValueError("Float 'rho' must be larger than zero, but"
" rho = %f" % rho)
if beta is None:
beta = dim
elif not np.isscalar(beta):
raise ValueError("Float 'beta' must be a scalar.")
elif beta <= dim - 1:
raise ValueError("Float 'beta' must be larger than the dimension"
" minus one, but beta = %f" % beta)
return dim, xi, rho, beta, W
@classmethod
def _check_mixture_model(cls, mixture_model):
if not isinstance(mixture_model, cls):
raise ValueError("'mixture_model' must be a conjugate"
" Gaussian mixture model."
" Got mixture_model = %r" % mixture_model)
return mixture_model
def _ms_log_prior(self, mixture_param):
ret = 0.0
_, S_c_chol, S_c_logdet = mixture_param.S_c
# TODO: Cache rho_S_c_chol and rho_S_c_logdet
rho_S_c_chol = np.sqrt(self.rho) * S_c_chol
rho_S_c_logdet = self.dim * np.log(self.rho) + S_c_logdet
ret += _normal_logpdf(mixture_param.mu_c, self.dim, self.xi,
rho_S_c_chol, rho_S_c_logdet)
# TODO: Cache beta_W_logdet
beta_W_logdet = _chol_logdet(self.dim, self._beta_W_chol)
ret += _wishart_logpdf(S_c_chol, S_c_logdet, self.dim, self.beta,
self._beta_W_chol, beta_W_logdet)
return ret
def _ms_log_likelihood(self, x_n, inv_c, mixture_param, random_state):
"""
Logarithm of the likelihood appearing in the M-H acceptance ratio used
by the merge-split samplers.
"""
ret = 0.0
for _, l in enumerate(inv_c):
ret += mixture_param.log_likelihood(x_n[l])
return ret
class DrawParam(GenericMixture.DrawParam):
def __init__(self, mixture_model, random_state):
super(ConjugateGaussianMixture.DrawParam, self).__init__(
random_state)
self._mixture_model = \
ConjugateGaussianMixture._check_mixture_model(
mixture_model)
self._mu_c = None
self._S_c = None
@property
def mu_c(self):
if self._mu_c is None:
mm = self._mixture_model
_, S_c_chol = self.S_c
self._mu_c = _normal_rvs(mm.dim, mm.xi,
np.sqrt(mm.rho)*S_c_chol, self._random_state)
return self._mu_c
@property
def S_c(self):
if self._S_c is None:
mm = self._mixture_model
S_c = _wishart_rvs(mm.dim, mm.beta, mm._beta_W_chol,
self._random_state)
S_c_chol = _chol(mm.dim, S_c)
self._S_c = (S_c, S_c_chol)
return self._S_c
def draw_x_n(self):
mm = self._mixture_model
_, S_c_chol = self.S_c
x_n = _normal_rvs(mm.dim, self.mu_c, S_c_chol, self._random_state)
return x_n
def phi_c(self):
S_c, _ = self.S_c
return {'mean': self.mu_c, 'precision': S_c}
def dump(self):
return self._mu_c, self._S_c
class InferParam(GenericMixture.InferParam):
def __init__(self, mixture_model, random_state):
super(ConjugateGaussianMixture.InferParam, self).__init__(
random_state)
self._mixture_model = \
ConjugateGaussianMixture._check_mixture_model(
mixture_model)
self._n_c = None
self._rho_c = None
self._beta_c = None
self._xsum_c = None
self._xi_c = None
self._beta_W_help_c_chol = None
self._mu_c = None
self._S_c = None
@property
def n_c(self):
if self._n_c is None:
return 0
else:
return self._n_c
@property
def rho_c(self):
if self._rho_c is None:
return self._mixture_model.rho
else:
return self._rho_c
@property
def beta_c(self):
if self._beta_c is None:
return self._mixture_model.beta
else:
return self._beta_c
@property
def xsum_c(self):
if self._xsum_c is None:
return np.zeros(self._mixture_model.dim, dtype=float)
else:
return self._xsum_c
@property
def xi_c(self):
if self._xi_c is None:
return self._mixture_model.xi
else:
return self._xi_c
@property
def beta_W_help_c_chol(self):
if self._beta_W_help_c_chol is None:
return self._mixture_model._beta_W_chol
else:
return self._beta_W_help_c_chol
@property
def mu_c(self):
if self._mu_c is None:
mm = self._mixture_model
# We need to draw `S_c` first, hence `self.S_c` instead of
# `self._S_c`
self._mu_c, _ = self._draw_mu_c(mm.dim, self.rho_c, self.S_c,
self.xi_c, self._random_state)
return self._mu_c
@staticmethod
def _prepare_mu_c(dim, rho_c, S_c):
# That is why we have to draw `S_c` first
if S_c is None:
raise ValueError
_, S_c_chol, S_c_logdet = S_c
rho_S_c_chol = np.sqrt(rho_c) * S_c_chol
rho_S_c_logdet = dim * np.log(rho_c) + S_c_logdet
return rho_S_c_chol, rho_S_c_logdet
@staticmethod
def _log_likelihood_mu_c(dim, rho_S_c_chol, rho_S_c_logdet, xi_c,
mu_c):
ret = _normal_logpdf(mu_c, dim, xi_c, rho_S_c_chol,
rho_S_c_logdet)
return ret
@classmethod
def _draw_mu_c(cls, dim, rho_c, S_c, xi_c, random_state,
compute_log_likelihood=False):
# Note that `rho_S_c_logdet` is computed as well although it is
# used only when `compute_log_likelihood` is `True`. It is much
# more efficient to compute it from `S_c_logdet` than from
# `rho_S_c_chol`
rho_S_c_chol, rho_S_c_logdet = cls._prepare_mu_c(dim, rho_c, S_c)
mu_c = _normal_rvs(dim, xi_c, rho_S_c_chol, random_state)
log_likelihood = 0.0
if compute_log_likelihood:
log_likelihood += cls._log_likelihood_mu_c(dim, rho_S_c_chol,
rho_S_c_logdet, xi_c, mu_c)
return mu_c, log_likelihood
@property
def S_c(self):
if self._S_c is None:
mm = self._mixture_model
self._S_c, _ = self._draw_S_c(mm.dim, self.n_c,
self.beta_W_help_c_chol, self.xsum_c, self._mu_c,
self.beta_c, self._random_state)
return self._S_c
@staticmethod
def _prepare_S_c(dim, n_c, beta_W_help_c_chol, xsum_c, mu_c):
if n_c > 0:
if mu_c is None:
raise ValueError
beta_W_c_chol = np.array(beta_W_help_c_chol, copy=True)
_chol_update(dim, beta_W_c_chol,
np.sqrt(n_c) * (xsum_c/n_c - mu_c))
else:
beta_W_c_chol = beta_W_help_c_chol
return beta_W_c_chol
@staticmethod
def _log_likelihood_S_c(dim, beta_c, beta_W_c_chol, S_c_chol,
S_c_logdet):
beta_W_c_logdet = _chol_logdet(dim, beta_W_c_chol)
ret = _wishart_logpdf(S_c_chol, S_c_logdet, dim, beta_c,
beta_W_c_chol, beta_W_c_logdet)
return ret
@classmethod
def _draw_S_c(cls, dim, n_c, beta_W_help_c_chol, xsum_c, mu_c, beta_c,
random_state, compute_log_likelihood=False):
beta_W_c_chol = cls._prepare_S_c(dim, n_c, beta_W_help_c_chol,
xsum_c, mu_c)
S_c = _wishart_rvs(dim, beta_c, beta_W_c_chol, random_state)
S_c_chol = _chol(dim, S_c)
S_c_logdet = _chol_logdet(dim, S_c_chol)
log_likelihood = 0.0
if compute_log_likelihood:
log_likelihood += cls._log_likelihood_S_c(dim, beta_c,
beta_W_c_chol, S_c_chol, S_c_logdet)
return (S_c, S_c_chol, S_c_logdet), log_likelihood
def update(self, x):
mm = self._mixture_model
if self._beta_W_help_c_chol is None:
self._beta_W_help_c_chol = np.array(mm._beta_W_chol,
copy=True)
else:
_chol_update(mm.dim, self._beta_W_help_c_chol,
np.sqrt(self._n_c / float(self._n_c+1)) * \
(x - self._xsum_c/self._n_c))
if self._n_c is None:
self._n_c = 1
else:
self._n_c += 1
if self._rho_c is None:
self._rho_c = mm.rho + 1.0
else:
self._rho_c += 1.0
if self._beta_c is None:
self._beta_c = mm.beta + 1.0
else:
self._beta_c += 1.0
if self._xsum_c is None:
self._xsum_c = np.array(x, copy=True)
else:
self._xsum_c += x
self._xi_c = (mm._rho_xi + self._xsum_c) / self._rho_c
return self
def downdate(self, x):
mm = self._mixture_model
if self._beta_W_help_c_chol is None:
raise ValueError('beta_W_help_c must be updated before it can'
' be downdated')
elif self._n_c > 1:
_chol_downdate(mm.dim, self._beta_W_help_c_chol,
np.sqrt(self._n_c / float(self._n_c-1)) * \
(x - self._xsum_c/self._n_c))
else:
self._beta_W_help_c_chol = None
if self._n_c is None:
raise ValueError('n_c must be updated before it can be'
' downdated')
elif self._n_c > 1:
self._n_c -= 1
else:
self._n_c = None
if self._rho_c is None:
raise ValueError('rho_c must be updated before it can be'
' downdated')
elif self._n_c is not None:
self._rho_c -= 1.0
else:
self._rho_c = None
if self._beta_c is None:
raise ValueError('beta_c must be updated before it can be'
' downdated')
elif self._n_c is not None:
self._beta_c -= 1.0
else:
self._beta_c = None
if self._xsum_c is None:
raise ValueError('xsum_c must be updated before it can be'
' downdated')
elif self._n_c is not None:
self._xsum_c -= x
else:
self._xsum_c = None
if self._n_c is not None:
self._xi_c = (mm._rho_xi + self._xsum_c) / self._rho_c
else:
self._xi_c = None
return self
def iterate(self, compute_log_likelihood=False):
mm = self._mixture_model
dim = mm.dim
# We have to sample `S_c` first
self._S_c, log_likelihood_S_c = self._draw_S_c(dim, self.n_c,
self.beta_W_help_c_chol, self.xsum_c, self._mu_c,
self.beta_c, self._random_state, compute_log_likelihood)
self._mu_c, log_likelihood_mu_c = self._draw_mu_c(dim, self.rho_c,
self._S_c, self.xi_c, self._random_state,
compute_log_likelihood)
return log_likelihood_mu_c + log_likelihood_S_c
def iterate_to(self, mixture_param, compute_log_likelihood=False):
mm = self._mixture_model
dim = mm.dim
log_likelihood = 0.0
# We have to do `S_c` first
self._S_c = mixture_param._S_c
if compute_log_likelihood:
_, S_c_chol, S_c_logdet = self._S_c
beta_W_c_chol = self._prepare_S_c(dim, self.n_c,
self.beta_W_help_c_chol, self.xsum_c, self._mu_c)
log_likelihood += self._log_likelihood_S_c(dim, self.beta_c,
beta_W_c_chol, S_c_chol, S_c_logdet)
self._mu_c = mixture_param._mu_c
if compute_log_likelihood:
rho_S_c_chol, rho_S_c_logdet = self._prepare_mu_c(dim,
self.rho_c, self._S_c)
log_likelihood += self._log_likelihood_mu_c(dim, rho_S_c_chol,
rho_S_c_logdet, xi_c, self._mu_c)
return log_likelihood
def log_likelihood(self, x):
# We have to do `S_c` first
_, S_c_chol, S_c_logdet = self.S_c
ret = _normal_logpdf(x, self._mixture_model.dim, self.mu_c,
S_c_chol, S_c_logdet)
return ret
def phi_c(self):
# We have to do `S_c` first
S_c, _, _ = self.S_c
return {'mean': self.mu_c, 'precision': S_c}
def dump(self):
return self._n_c, self._R_c_chol, self._beta_c, self._xsum_c, \
self._xi_c, self._beta_W_help_c_chol, self._mu_c, \
self._S_c
class NonconjugateGaussianMixture(GenericMixture):
"""
Conditionally conjugate Gaussian mixture model. Parametrization according
to Görür and Rasmussen (2010).
Parameters
----------
xi : None or array-like, optional
xi hyperparameter
R : None or array-like, optional
R hyperparameter
beta : None or float, optional
beta hyperparameter
W : None or array-like, optional
W hyperparameter
References
----------
Görür, D. and Rasmussen, C. E. (2010). Dirichlet process Gaussian mixture
models: Choice of the base distribution. Journal of Computer Science
and Technology, 25(4): 615-626.
"""
def __init__(self, xi=None, R=1.0, beta=1.0, W=1.0):
super(NonconjugateGaussianMixture, self).__init__()
self.dim, self.xi, self.R, self.beta, self.W = \
self._check_parameters(None, xi, R, beta, W)
self._R_xi = np.dot(self.R, self.xi)
self._R_chol = _chol(self.dim, self.R)
self._beta_W_chol = _chol(self.dim, self.beta * self.W)
@staticmethod
def _check_parameters(dim, xi, R, beta, W):
if dim is None:
if xi is None:
if R is None:
if W is None:
dim = 1
else:
W = np.asarray(W, dtype=float)
if W.ndim < 2:
dim = 1
else:
dim = W.shape[0]
else:
R = np.asarray(R, dtype=float)
if R.ndim < 2:
dim = 1
else:
dim = R.shape[0]
else:
xi = np.asarray(xi, dtype=float)
dim = xi.size
else:
if not np.isscalar(dim):
msg = ("Dimension of random variable must be a scalar.")
raise ValueError(msg)
if xi is None:
xi = np.zeros(dim, dtype=float)
xi = np.asarray(xi, dtype=float)
if R is None:
R = 1.0
R = np.asarray(R, dtype=float)
if W is None:
W = 1.0
W = np.asarray(W, dtype=float)
if dim == 1:
xi.shape = (1,)
R.shape = (1, 1)
W.shape = (1, 1)
if xi.ndim != 1 or xi.shape[0] != dim:
msg = ("Array 'xi' must be a vector of length %d." % dim)
raise ValueError(msg)
if R.ndim == 0:
R = R * np.eye(dim)
elif R.ndim == 1:
R = np.diag(R)
elif R.ndim == 2 and R.shape != (dim, dim):
rows, cols = R.shape
if rows != cols:
msg = ("Array 'R' must be square if it is two-dimensional,"
" but R.shape = %s." % str(R.shape))
else:
msg = ("Dimension mismatch: array 'R' is of shape %s,"
" but 'xi' is a vector of length %d.")
msg = msg % (str(R.shape), len(xi))
raise ValueError(msg)
elif R.ndim > 2:
raise ValueError("Array 'R' must be at most two-dimensional,"
" but R.ndim = %d" % R.ndim)
if W.ndim == 0:
W = W * np.eye(dim)
elif W.ndim == 1:
W = np.diag(W)
elif W.ndim == 2 and W.shape != (dim, dim):
rows, cols = W.shape
if rows != cols:
msg = ("Array 'W' must be square if it is two-dimensional,"
" but W.shape = %s." % str(W.shape))
else:
msg = ("Dimension mismatch: array 'W' is of shape %s,"
" but 'xi' is a vector of length %d.")
msg = msg % (str(W.shape), len(xi))
raise ValueError(msg)
elif W.ndim > 2:
raise ValueError("Array 'W' must be at most two-dimensional,"
" but W.ndim = %d" % W.ndim)
if beta is None:
beta = dim
elif not np.isscalar(beta):
raise ValueError("Float 'beta' must be a scalar.")
elif beta <= dim - 1:
raise ValueError("Float 'beta' must be larger than the dimension"
" minus one, but beta = %f" % beta)
return dim, xi, R, beta, W
@classmethod
def _check_mixture_model(cls, mixture_model):
if not isinstance(mixture_model, cls):
raise ValueError("'mixture_model' must be a non-conjugate"
" Gaussian mixture model."
" Got mixture_model = %r" % mixture_model)
return mixture_model
def _ms_log_prior(self, mixture_param):
ret = 0.0
# TODO: Cache R_logdet
R_logdet = _chol_logdet(self.dim, self._R_chol)
ret += _normal_logpdf(mixture_param.mu_c, self.dim, self.xi,
self._R_chol, R_logdet)
_, S_c_chol, S_c_logdet = mixture_param.S_c
# TODO: Cache beta_W_logdet
beta_W_logdet = _chol_logdet(self.dim, self._beta_W_chol)
ret += _wishart_logpdf(S_c_chol, S_c_logdet, self.dim, self.beta,
self._beta_W_chol, beta_W_logdet)
return ret
def _ms_log_likelihood(self, x_n, inv_c, mixture_param, random_state):
"""
Logarithm of the likelihood appearing in the M-H acceptance ratio used
by the merge-split samplers.
"""
ret = 0.0
for _, l in enumerate(inv_c):
ret += mixture_param.log_likelihood(x_n[l])
return ret
class DrawParam(GenericMixture.DrawParam):
def __init__(self, mixture_model, random_state):
super(NonconjugateGaussianMixture.DrawParam, self).__init__(
random_state)
self._mixture_model = \
NonconjugateGaussianMixture._check_mixture_model(
mixture_model)
self._mu_c = None
self._S_c = None
@property
def mu_c(self):
if self._mu_c is None:
mm = self._mixture_model
self._mu_c = _normal_rvs(mm.dim, mm.xi, mm._R_chol,
self._random_state)
return self._mu_c
@property
def S_c(self):
if self._S_c is None:
mm = self._mixture_model
S_c = _wishart_rvs(mm.dim, mm.beta, mm._beta_W_chol,
self._random_state)
S_c_chol = _chol(mm.dim, S_c)
S_c_logdet = _chol_logdet(mm.dim, S_c_chol)
self._S_c = (S_c, S_c_chol, S_c_logdet)
return self._S_c
def draw_x_n(self):
_, S_c_chol, _ = self.S_c
x_n = _normal_rvs(self._mixture_model.dim, self.mu_c, S_c_chol,
self._random_state)
return x_n
def phi_c(self):
S_c, _, _ = self.S_c
return {'mean': self.mu_c, 'precision': S_c}
def dump(self):
return self._mu_c, self._S_c
class InferParam(GenericMixture.InferParam):
def __init__(self, mixture_model, random_state):
super(NonconjugateGaussianMixture.InferParam, self).__init__(
random_state)
self._mixture_model = \
NonconjugateGaussianMixture._check_mixture_model(
mixture_model)
self._n_c = None
self._beta_c = None
self._xsum_c = None
self._beta_W_help_c_chol = None
self._mu_c = None
self._S_c = None
@property
def n_c(self):
if self._n_c is None:
return 0
else:
return self._n_c
@property
def beta_c(self):
if self._beta_c is None:
return self._mixture_model.beta
else:
return self._beta_c
@property
def xsum_c(self):
if self._xsum_c is None:
return np.zeros(self._mixture_model.dim, dtype=float)
else:
return self._xsum_c
@property
def beta_W_help_c_chol(self):
if self._beta_W_help_c_chol is None:
return self._mixture_model._beta_W_chol
else:
return self._beta_W_help_c_chol
@property
def mu_c(self):
if self._mu_c is None:
mm = self._mixture_model
self._mu_c, _ = self._draw_mu_c(mm.dim, self.n_c, self.xsum_c,
self._S_c, mm.R, mm._R_chol, mm.xi, mm._R_xi,
self._random_state)
return self._mu_c
@staticmethod
def _prepare_mu_c(dim, n_c, xsum_c, S_c, R, R_chol, xi, R_xi):
if n_c > 0:
if S_c is None:
raise ValueError
S_c, _, _ = S_c
R_c_chol = _chol(dim, R + n_c*S_c)
xi_c = _chol_solve(dim, R_c_chol, np.dot(S_c, xsum_c) + R_xi)
else:
R_c_chol = R_chol
xi_c = xi
return R_c_chol, xi_c
@staticmethod
def _log_likelihood_mu_c(dim, R_c_chol, xi_c, mu_c):
R_c_logdet = _chol_logdet(dim, R_c_chol)
ret = _normal_logpdf(mu_c, dim, xi_c, R_c_chol, R_c_logdet)
return ret
@classmethod
def _draw_mu_c(cls, dim, n_c, xsum_c, S_c, R, R_chol, xi, R_xi,
random_state, compute_log_likelihood=False):
R_c_chol, xi_c = cls._prepare_mu_c(dim, n_c, xsum_c, S_c, R,
R_chol, xi, R_xi)
mu_c = _normal_rvs(dim, xi_c, R_c_chol, random_state)
log_likelihood = 0.0
if compute_log_likelihood:
log_likelihood += cls._log_likelihood_mu_c(dim, R_c_chol,
xi_c, mu_c)
return mu_c, log_likelihood
@property
def S_c(self):
if self._S_c is None:
self._S_c, _ = self._draw_S_c(self._mixture_model.dim,
self.n_c, self.beta_W_help_c_chol, self.xsum_c,
self._mu_c, self.beta_c, self._random_state)
return self._S_c
@staticmethod
def _prepare_S_c(dim, n_c, beta_W_help_c_chol, xsum_c, mu_c):
if n_c > 0:
if mu_c is None:
raise ValueError
beta_W_c_chol = np.array(beta_W_help_c_chol, copy=True)
_chol_update(dim, beta_W_c_chol,
np.sqrt(n_c) * (xsum_c/n_c - mu_c))
else:
beta_W_c_chol = beta_W_help_c_chol
return beta_W_c_chol
@staticmethod
def _log_likelihood_S_c(dim, beta_c, beta_W_c_chol, S_c_chol,
S_c_logdet):
beta_W_c_logdet = _chol_logdet(dim, beta_W_c_chol)
ret = _wishart_logpdf(S_c_chol, S_c_logdet, dim, beta_c,
beta_W_c_chol, beta_W_c_logdet)
return ret
@classmethod
def _draw_S_c(cls, dim, n_c, beta_W_help_c_chol, xsum_c, mu_c, beta_c,
random_state, compute_log_likelihood=False):
beta_W_c_chol = cls._prepare_S_c(dim, n_c, beta_W_help_c_chol,
xsum_c, mu_c)
S_c = _wishart_rvs(dim, beta_c, beta_W_c_chol, random_state)
S_c_chol = _chol(dim, S_c)
S_c_logdet = _chol_logdet(dim, S_c_chol)
log_likelihood = 0.0
if compute_log_likelihood:
log_likelihood += cls._log_likelihood_S_c(dim, beta_c,
beta_W_c_chol, S_c_chol, S_c_logdet)
return (S_c, S_c_chol, S_c_logdet), log_likelihood
def update(self, x):
mm = self._mixture_model
if self._beta_W_help_c_chol is None:
self._beta_W_help_c_chol = np.array(mm._beta_W_chol,
copy=True)
else:
_chol_update(mm.dim, self._beta_W_help_c_chol,
np.sqrt(self._n_c / float(self._n_c+1)) * \
(x - self._xsum_c/self._n_c))
if self._n_c is None:
self._n_c = 1
else:
self._n_c += 1
if self._xsum_c is None:
self._xsum_c = np.array(x, copy=True)
else:
self._xsum_c += x
if self._beta_c is None:
self._beta_c = mm.beta + 1.0
else:
self._beta_c += 1.0
return self
def downdate(self, x):
mm = self._mixture_model
if self._beta_W_help_c_chol is None:
raise ValueError('beta_W_help_c must be updated before it can'
' be downdated')
elif self._n_c > 1:
_chol_downdate(mm.dim, self._beta_W_help_c_chol,
np.sqrt(self._n_c / float(self._n_c-1)) * \
(x - self._xsum_c/self._n_c))
else:
self._beta_W_help_c_chol = None
if self._n_c is None:
raise ValueError('n_c must be updated before it can be'
' downdated')
elif self._n_c > 1:
self._n_c -= 1
else:
self._n_c = None
if self._xsum_c is None:
raise ValueError('xsum_c must be updated before it can be'
' downdated')
elif self._n_c is not None:
self._xsum_c -= x
else:
self._xsum_c = None
if self._beta_c is None:
raise ValueError('beta_c must be updated before it can be'
' downdated')
elif self._n_c is not None:
self._beta_c -= 1.0
else:
self._beta_c = None
return self
def iterate(self, compute_log_likelihood=False):
mm = self._mixture_model
dim = mm.dim
self._mu_c, log_likelihood_mu_c = self._draw_mu_c(dim, self.n_c,
self.xsum_c, self._S_c, mm.R, mm._R_chol, mm.xi, mm._R_xi,
self._random_state, compute_log_likelihood)
self._S_c, log_likelihood_S_c = self._draw_S_c(dim, self.n_c,
self.beta_W_help_c_chol, self.xsum_c, self._mu_c,
self.beta_c, self._random_state, compute_log_likelihood)
return log_likelihood_mu_c + log_likelihood_S_c
def iterate_to(self, mixture_param, compute_log_likelihood=False):
mm = self._mixture_model
dim = mm.dim
log_likelihood = 0.0
self._mu_c = mixture_param._mu_c
if compute_log_likelihood:
R_c_chol, xi_c = self._prepare_mu_c(dim, self.n_c,
self.xsum_c, self._S_c, mm.R, mm._R_chol, mm.xi,
mm._R_xi)
log_likelihood += self._log_likelihood_mu_c(dim, R_c_chol,
xi_c, self._mu_c)
self._S_c = mixture_param._S_c
if compute_log_likelihood:
_, S_c_chol, S_c_logdet = self._S_c
beta_W_c_chol = self._prepare_S_c(dim, self.n_c,
self.beta_W_help_c_chol, self.xsum_c, self._mu_c)
log_likelihood += self._log_likelihood_S_c(dim, self.beta_c,
beta_W_c_chol, S_c_chol, S_c_logdet)
return log_likelihood
def log_likelihood(self, x):
_, S_c_chol, S_c_logdet = self.S_c
ret = _normal_logpdf(x, self._mixture_model.dim, self.mu_c,
S_c_chol, S_c_logdet)
return ret
def phi_c(self):
S_c, _, _ = self.S_c
return {'mean': self.mu_c, 'precision': S_c}
def dump(self):
return self._n_c, self._R_c_chol, self._beta_c, self._xsum_c, \
self._xi_c, self._beta_W_help_c_chol, self._mu_c, \
self._S_c
| {
"content_hash": "632b46aa06455be0e5d4a1a27a5c29dd",
"timestamp": "",
"source": "github",
"line_count": 1515,
"max_line_length": 78,
"avg_line_length": 30.51815181518152,
"alnum_prop": 0.4742727371039256,
"repo_name": "tscholak/imm",
"id": "7da7cea2181083791f1c14ef59d84e636e0fa423",
"size": "46272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imm/models/mixtures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "150341"
}
],
"symlink_target": ""
} |
from api.decorators import api_view, request_data_defaultdc
from api.permissions import IsSuperAdmin
from api.utils.db import get_object
from api.node.image.api_views import NodeImageView
from vms.models import NodeStorage, Image
__all__ = ('node_image_list', 'node_image')
@api_view(('GET', 'DELETE'))
@request_data_defaultdc(permissions=(IsSuperAdmin,))
def node_image_list(request, hostname, zpool, data=None):
"""
List (:http:get:`GET </node/(hostname)/storage/(zpool)/image>`) all images imported on a compute node storage or
remove (:http:delete:`DELETE </node/(hostname)/storage/(zpool)/image>`) all unused images
imported on a compute node storage.
.. http:get:: /node/(hostname)/storage/(zpool)/image
:DC-bound?:
* |dc-no|
:Permissions:
* |SuperAdmin|
:Asynchronous?:
* |async-no|
:arg hostname: **required** - Node hostname
:type hostname: string
:arg zpool: **required** - Node storage pool name
:type zpool: string
:arg data.full: Return list of objects with all image details (default: false)
:type data.full: boolean
:arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name`` (default: ``name``)
:type data.order_by: string
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Storage not found
.. http:delete:: /node/(hostname)/storage/(zpool)/image
.. note:: This API function will run \
:http:delete:`DELETE node_image </node/(hostname)/storage/(zpool)/image/(name)>` for every unused image.
:DC-bound?:
* |dc-no|
:Permissions:
* |SuperAdmin|
:Asynchronous?:
* |async-no|
:arg hostname: **required** - Node hostname
:type hostname: string
:arg zpool: **required** - Node storage pool name
:type zpool: string
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Storage not found
:status 423: Node is not operational
"""
ns = get_object(request, NodeStorage, {'node__hostname': hostname, 'zpool': zpool},
exists_ok=True, noexists_fail=True, sr=('node',))
images = ns.images.select_related('owner', 'dc_bound').order_by(*NodeImageView.get_order_by(data))
node_image_view = NodeImageView(request, ns, images, data)
if request.method == 'DELETE':
return node_image_view.cleanup()
else:
return node_image_view.get(many=True)
@api_view(('GET', 'POST', 'DELETE'))
@request_data_defaultdc(permissions=(IsSuperAdmin,))
def node_image(request, hostname, zpool, name, data=None):
"""
Show (:http:get:`GET </node/(hostname)/storage/(zpool)/image/(name)>`),
import (:http:post:`POST </node/(hostname)/storage/(zpool)/image/(name)>`) or
delete (:http:delete:`DELETE </node/(hostname)/storage/(zpool)/image/(name)>`)
an image (name) on a compute node (hostname) storage (zpool).
.. http:get:: /node/(hostname)/storage/(zpool)/image/(name)
:DC-bound?:
* |dc-no|
:Permissions:
* |SuperAdmin|
:Asynchronous?:
* |async-no|
:arg hostname: **required** - Node hostname
:type hostname: string
:arg zpool: **required** - Node storage pool name
:type zpool: string
:arg name: **required** - Image name
:type name: string
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Storage not found / Image not found
.. http:post:: /node/(hostname)/storage/(zpool)/image/(name)
:DC-bound?:
* |dc-no|
:Permissions:
* |SuperAdmin|
:Asynchronous?:
* |async-yes|
:arg hostname: **required** - Node hostname
:type hostname: string
:arg zpool: **required** - Node storage pool name
:type zpool: string
:arg name: **required** - Image name
:type name: string
:status 200: SUCCESS
:status 201: PENDING
:status 400: FAILURE
:status 403: Forbidden
:status 404: Storage not found / Image not found
:status 406: Image already exists
:status 423: Node is not operational
:status 428: Image requires newer node version / Image requires newer node version
.. http:delete:: /node/(hostname)/storage/(zpool)/image/(name)
:DC-bound?:
* |dc-no|
:Permissions:
* |SuperAdmin|
:Asynchronous?:
* |async-yes|
:arg hostname: **required** - Node hostname
:type hostname: string
:arg zpool: **required** - Node storage pool name
:type zpool: string
:arg name: **required** - Image name
:type name: string
:status 200: SUCCESS
:status 201: PENDING
:status 400: FAILURE
:status 403: Forbidden
:status 404: Storage not found / Image not found
:status 423: Node is not operational
:status 428: Image is used by some VMs
"""
ns = get_object(request, NodeStorage, {'node__hostname': hostname, 'zpool': zpool},
exists_ok=True, noexists_fail=True, sr=('node', 'storage'))
attrs = {'name': name}
if request.method != 'POST':
attrs['nodestorage'] = ns
img = get_object(request, Image, attrs, sr=('owner', 'dc_bound'), exists_ok=True, noexists_fail=True)
return NodeImageView(request, ns, img, data).response()
| {
"content_hash": "71416c9582e27f4372381ade6553d314",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 116,
"avg_line_length": 36.36842105263158,
"alnum_prop": 0.5965991316931982,
"repo_name": "erigones/esdc-ce",
"id": "9d8c5e21c199519f9dfd63a5257111e6de6d0947",
"size": "5528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/node/image/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
from pg8000 import DBAPI
class dbManager():
'''
Postgresql driver for jxgpy
dbManager for pg8000
'''
driver="pg8000Driver"
def __init__(self,host,port,user,password,database):
self.host=host
self.port=port
self.user=user
self.password=password
self.database=database
self.connection=''
self.cursorsDict={}
def connect(self):
self.connection = DBAPI.connect(host=self.host, port=self.port, user=self.user, password=self.password,database=self.database)
def closeConnection(self):
self.connection.close()
def setCursor(self,cursorName):
self.cursorsDict[cursorName]=self.connection.cursor()
return cursorName
def unSetCursor(self,cursorName):
self.cursorsDict[cursorName].close
def select(self,cursorName,select):
self.cursorsDict[cursorName].execute(select)
resultDict={}
resultDict['fieldsDict']={}
fieldIndex=0
for fieldInfo in self.cursorsDict[cursorName].description:
fieldName = fieldInfo[0]
resultDict['fieldsDict'][fieldName]=fieldIndex
fieldIndex=fieldIndex+1
resultDict['resultList']=self.cursorsDict[cursorName].fetchall()
return resultDict
def getLastRowCount(self,cursorName):
return self.cursorsDict[cursorName].rowcount
def change(self,cursorName,change):
self.cursorsDict[cursorName].execute(change)
self.connection.commit()
def getLastAffectedRowsNumber(self,cursorName):
return self.cursorsDict[cursorName].rowcount
def getTablesList(self,cursorName):
sql="SELECT tablename FROM pg_tables WHERE tablename NOT LIKE 'pg%%' AND tablename NOT LIKE 'sql%%'"
self.cursorsDict[cursorName].execute(sql)
resultList=[]
for line in self.cursorsDict[cursorName].fetchall():
resultList.append(line[0])
return resultList
"""
Example (May be out of date)
db1=dbManager('192.168.0.200', 3306, 'tests', '12345', 'tests')
db1.connect()
###
cursor1=db1.setCursor('cursor1')
(qResList,qFieldsList)=db1.select(cursor1, 'SELECT uno,dos FROM nombres')
db1.unSetCursor(cursor1)
for line in qResList:
print 'Campo '+line[qFieldsList['dos']]
###
cursor2=db1.setCursor('cursor2')
db1.change(cursor2,"update nombres set uno='u' where uno='uno'")
print 'Actualizadas '+str(db1.getLastAffectedRowsNumber(cursor2))
db1.change(cursor2,"delete from nombres where uno='x'")
print 'Eliminadas '+str(db1.getLastAffectedRowsNumber(cursor2))
db1.change(cursor2,"insert into nombres values('x','y','z')")
print 'Insertadas '+str(db1.getLastAffectedRowsNumber(cursor2))
db1.unSetCursor(cursor2)
db1.closeConnection()
""" | {
"content_hash": "c4b35f5e1424a3693887466792b8af29",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 134,
"avg_line_length": 24.146341463414632,
"alnum_prop": 0.6377104377104377,
"repo_name": "julianariasquijano/jxgpy",
"id": "6b3f101715c3d714c8e5355d6a9e286c7df4c86b",
"size": "2970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbs/pg8000Driver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39000"
}
],
"symlink_target": ""
} |
import errno
import os
__author__ = 'justin@shapeways.com'
def create_directory(directory):
"""Creates a directory if it does not exist (in a thread-safe way)
@param directory: The directory to create
@return: The directory specified
"""
try:
os.makedirs(directory)
except OSError, e:
if e.errno == errno.EEXIST and os.path.isdir(directory):
pass
return directory | {
"content_hash": "1cf01e716b68c5fea7ced06f35c6d678",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 22.31578947368421,
"alnum_prop": 0.6533018867924528,
"repo_name": "Shapeways/coyote_framework",
"id": "3d257f123b539089ba912417ac16a292b6a5ab8e",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coyote_framework/mixins/filesystem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "737"
},
{
"name": "HTML",
"bytes": "286"
},
{
"name": "JavaScript",
"bytes": "7310"
},
{
"name": "Python",
"bytes": "187655"
}
],
"symlink_target": ""
} |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class MembersApphook(CMSApp):
name = _("Members Apphook")
app_name = 'members'
def get_urls(self, page=None, language=None, **kwargs):
return ['allink_apps.members.urls']
apphook_pool.register(MembersApphook)
| {
"content_hash": "3aa887b00b23e412bb135f654e08423d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 26,
"alnum_prop": 0.7252747252747253,
"repo_name": "allink/allink-apps",
"id": "42b850c10a70cfc827daaf62fdbe03c6fa133824",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/cms_apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "994"
},
{
"name": "HTML",
"bytes": "47533"
},
{
"name": "Python",
"bytes": "183917"
}
],
"symlink_target": ""
} |
import os
from distutils.core import setup
# Figure out the version; this could be done by importing the
# module, though that requires Django to be already installed,
# which may not be the case when processing a pip requirements
# file, for example.
import re
here = os.path.dirname(os.path.abspath(__file__))
version_re = re.compile(r'__version__ = (\(.*?\))')
fp = open(os.path.join(here, 'django_tables', '__init__.py'))
version = None
for line in fp:
match = version_re.search(line)
if match:
version = eval(match.group(1))
break
else:
raise Exception("Cannot find version in __init__.py")
fp.close()
def find_packages(root):
# so we don't depend on setuptools; from the Storm ORM setup.py
packages = []
for directory, subdirectories, files in os.walk(root):
if '__init__.py' in files:
packages.append(directory.replace(os.sep, '.'))
return packages
setup(
name='django-tables',
version=".".join(map(str, version)),
description='Render QuerySets as tabular data in Django.',
author='Michael Elsdoerfer',
author_email='michael@elsdoerfer.info',
license='BSD',
url='http://launchpad.net/django-tables',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
packages=find_packages('django_tables'),
)
| {
"content_hash": "549c1e7a81cadd1304550dac491f3bc8",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 67,
"avg_line_length": 30.925925925925927,
"alnum_prop": 0.6407185628742516,
"repo_name": "PolicyStat/django-tables",
"id": "d70f0ecbe5d6b3a3caa130485b1e9878b5b87426",
"size": "1692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "87193"
}
],
"symlink_target": ""
} |
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
| {
"content_hash": "e91350a66dce9624e4169b645c2cc382",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 34.4375,
"alnum_prop": 0.6116152450090744,
"repo_name": "rladeira/mltils",
"id": "e1c4d38d622a0233f826759f9bbc6c2317b02ca2",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mltils/sklearn/bagging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62562"
}
],
"symlink_target": ""
} |
__revision__ = "test/MSVS/vs-8.0-clean.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the -c option's ability to clean generated Visual Studio 8.0
project (.vcproj) and solution (.sln) files.
"""
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['8.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_8_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_8_0
test.write('SConstruct', """\
env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='8.0',
CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
CPPPATH=['inc1', 'inc2'],
HOST_ARCH='%(HOST_ARCH)s')
testsrc = ['test1.cpp', 'test2.cpp']
testincs = ['sdk.h']
testlocalincs = ['test.h']
testresources = ['test.rc']
testmisc = ['readme.txt']
p = env.MSVSProject(target = 'Test.vcproj',
srcs = testsrc,
incs = testincs,
localincs = testlocalincs,
resources = testresources,
misc = testmisc,
buildtarget = 'Test.exe',
variant = 'Release',
auto_build_solution = 0)
env.MSVSSolution(target = 'Test.sln',
slnguid = '{SLNGUID}',
projects = [p],
variant = 'Release')
"""%{'HOST_ARCH': host_arch})
test.run(arguments=".")
test.must_exist(test.workpath('Test.vcproj'))
vcproj = test.read('Test.vcproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '8.0', None, 'SConstruct')
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '8.0', None, 'SConstruct')
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.run(arguments='-c .')
test.must_not_exist(test.workpath('Test.vcproj'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='.')
test.must_exist(test.workpath('Test.vcproj'))
test.must_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.sln')
test.must_exist(test.workpath('Test.vcproj'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.vcproj')
test.must_not_exist(test.workpath('Test.vcproj'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "cf25558d576d67547c3d239f4e61af0a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 99,
"avg_line_length": 28.247311827956988,
"alnum_prop": 0.6372287780738485,
"repo_name": "EmanueleCannizzaro/scons",
"id": "259d0b3c87763d72da83d0d831089f0096462835",
"size": "3762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/MSVS/vs-8.0-clean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="ticktext", parent_name="choroplethmapbox.colorbar", **kwargs
):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
| {
"content_hash": "9acbe738892e08fd49e9b17112c6d468",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.6115702479338843,
"repo_name": "plotly/python-api",
"id": "cb9744392d03a406fd7184b516f72959f79de9ea",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/_ticktext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
rentabot.controllers
~~~~~~~~~~~~~~~~~~~~
This module contains rent-a-bot functions related to database manipulation.
"""
from rentabot.models import Resource, db
from rentabot.exceptions import ResourceException, ResourceNotFound
from rentabot.exceptions import ResourceAlreadyLocked, ResourceAlreadyUnlocked, InvalidLockToken
from rentabot.exceptions import ResourceDescriptorIsEmpty
from rentabot.logger import get_logger
from uuid import uuid4
import yaml
import threading
thread_safe_lock = threading.Lock()
logger = get_logger(__name__)
def get_all_ressources():
"""Returns a list of resources."""
return Resource.query.all()
def get_resource_from_id(resource_id):
"""Returns a Resource object given it's id.
Args:
resource_id: the index of the resource in the database.
Returns:
A Resource object.
"""
resource = Resource.query.filter_by(id=resource_id).first()
if resource is None:
logger.warning("Resource not found. Id : {}".format(resource_id))
raise ResourceNotFound(message="Resource not found",
payload={'resource_id': resource_id})
return resource
def get_resource_from_name(resource_name):
"""Returns a Resource object given it's name.
Args:
resource_name: the name of the resource.
Returns:
A Resource object.
"""
resource = Resource.query.filter_by(name=resource_name).first()
if resource is None:
logger.warning("Resource not found. Name : {}".format(resource_name))
raise ResourceNotFound(message="Resource not found",
payload={'resource_name': resource_name})
return resource
def get_resources_from_tags(resource_tags):
"""Returns a Resource object list given their tags.
Args:
resource_tags: the tags of the resource we are looking for.
Returns:
A Resource object.
"""
all_resources = get_all_ressources()
resources = list()
# Filter the ones matching the tags, TODO: Use database more efficiently
for resource in all_resources:
if not resource.tags:
continue
if set(resource.tags.split()).intersection(set(resource_tags)) == set(resource_tags):
resources.append(resource)
if not resources:
logger.warning("Resources not found. Tag(s) : {}".format(resource_tags))
raise ResourceNotFound(message="No resource found matching the tag(s)",
payload={'tags': resource_tags})
return resources
def get_an_available_resource(rid=None, name=None, tags=None):
"""Returns an available resource object.
Args:
rid (int): The id
name (str):
tags (list):
Returns:
(Resource) A resource object
"""
if rid:
resource = get_resource_from_id(rid)
elif name:
resource = get_resource_from_name(name)
elif tags:
resources = get_resources_from_tags(tags)
for resource in resources:
if resource.lock_token is None:
break
else:
raise ResourceException(message="Bad Request")
if resource.lock_token is not None:
logger.warning("Resource already locked. Id : {}".format(resource.id))
raise ResourceAlreadyLocked(message="Cannot lock the requested resource, resource(s) already locked",
payload={'id': rid,
'name': name,
'tags': tags
})
return resource
def lock_resource(rid=None, name=None, tags=None):
"""Lock resource. Raise an exception if the resource is already locked.
Args:
rid (int): The id of the resource to lock.
name (str): The name of the resource to lock.
tags (list): The tags of the resource to lock.
Returns:
The lock token value
"""
# Prevent concurrent database access in a multi threaded execution context
with thread_safe_lock:
resource = get_an_available_resource(rid=rid, name=name, tags=tags)
resource.lock_token = str(uuid4())
resource.lock_details = u'Resource locked'
db.session.commit()
logger.info("Resource locked. Id : {}".format(resource.id))
return resource.lock_token, resource
def unlock_resource(resource_id, lock_token):
"""Unlock resource. Raise an exception if the token is invalid or if the resource is already unlocked.
Args:
resource_id (int): The id of the resource to unlock.
lock_token (str): The lock token to authorize the unlock.
Returns:
None
"""
resource = get_resource_from_id(resource_id)
if resource.lock_token is None:
logger.warning("Resource already unlocked. Id : {}".format(resource_id))
raise ResourceAlreadyUnlocked(message="Resource is already unlocked",
payload={'resource_id': resource_id})
if lock_token != resource.lock_token:
msg = "Incorrect lock token. Id : {}, lock-token : {}, resource lock-token : {}".format(resource_id,
lock_token,
resource.lock_token)
logger.warning(msg)
raise InvalidLockToken(message="Cannot unlock resource, the lock token is not valid.",
payload={'resource': resource.dict,
'invalid-lock-token': lock_token})
resource.lock_token = None
resource.lock_details = u'Resource available'
db.session.commit()
logger.info("Resource unlocked. Id : {}".format(resource_id))
def populate_database_from_file(resource_descriptor):
""" Populate the database using the resources described in a yaml file.
Args:
resource_descriptor (str): the resource descriptor.
Returns:
(list) resources name added
"""
logger.info("Populating the database. Descriptor : {}".format(resource_descriptor))
with open(resource_descriptor, "r") as f:
resources = yaml.load(f)
if resources is None:
raise ResourceDescriptorIsEmpty(resource_descriptor)
db.drop_all()
db.create_all()
for resource_name in list(resources):
logger.debug("Add resource : {}".format(resource_name))
try:
description = resources[resource_name]['description']
except KeyError:
description = None
try:
endpoint = resources[resource_name]['endpoint']
except KeyError:
endpoint = None
try:
tags = resources[resource_name]['tags']
except KeyError:
tags = None
db.session.add(Resource(resource_name,
description=description,
endpoint=endpoint,
tags=tags))
db.session.commit()
return list(resources)
| {
"content_hash": "e2bafc94c62d667276e882df8adcbd6c",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 116,
"avg_line_length": 31.721238938053098,
"alnum_prop": 0.6005021620867624,
"repo_name": "cpoisson/rent-a-bot",
"id": "72a8affefaecb371581993cba7c94eb42a991f6d",
"size": "7193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rentabot/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1987"
},
{
"name": "Python",
"bytes": "42447"
}
],
"symlink_target": ""
} |
import requests
import yaml
response = requests.get(
'https://raw.githubusercontent.com/Lens10/datatrue-whitelist/master/'
'whitelist.yml')
regions = yaml.load(response.text)
all_addresses = [item for sublist in regions.values() for item in sublist]
for address in sorted(all_addresses):
print(address)
| {
"content_hash": "4050a77dbc1a6979a9b09aaf63d89215",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 28.818181818181817,
"alnum_prop": 0.750788643533123,
"repo_name": "Lens10/datatrue-whitelist",
"id": "68fe36b26ddac62435899ef53945636837f8fc66",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
bind = '127.0.0.1:1710'
threads = 3
workers = 6
loglevel = "debug" # debug, info, warning, error and critical
errorlog = '/tmp/grader-edx.log'
| {
"content_hash": "db565a3a7d728e99fb567e0dcae0364f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 28.8,
"alnum_prop": 0.6805555555555556,
"repo_name": "brnomendes/grader-edx",
"id": "c555fe184a3ad90aabbac8ccef5e9729084a4237",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gunicorn_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "972"
},
{
"name": "Python",
"bytes": "15921"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
} |
import datetime
from keystoneclient.openstack.common import timeutils
# gap, in seconds, to determine whether the given token is about to expire
STALE_TOKEN_DURATION = 30
class AccessInfo(dict):
"""An object for encapsulating a raw authentication token from keystone
and helper methods for extracting useful values from that token."""
def will_expire_soon(self, stale_duration=None):
""" Determines if expiration is about to occur.
:return: boolean : true if expiration is within the given duration
"""
stale_duration = (STALE_TOKEN_DURATION if stale_duration is None
else stale_duration)
norm_expires = timeutils.normalize_time(self.expires)
# (gyee) should we move auth_token.will_expire_soon() to timeutils
# instead of duplicating code here?
soon = (timeutils.utcnow() + datetime.timedelta(
seconds=stale_duration))
return norm_expires < soon
@property
def expires(self):
""" Returns the token expiration (as datetime object)
:returns: datetime
"""
return timeutils.parse_isotime(self['token']['expires'])
@property
def auth_token(self):
""" Returns the token_id associated with the auth request, to be used
in headers for authenticating OpenStack API requests.
:returns: str
"""
return self['token'].get('id', None)
@property
def username(self):
""" Returns the username associated with the authentication request.
Follows the pattern defined in the V2 API of first looking for 'name',
returning that if available, and falling back to 'username' if name
is unavailable.
:returns: str
"""
name = self['user'].get('name', None)
if name:
return name
else:
return self['user'].get('username', None)
@property
def user_id(self):
""" Returns the user id associated with the authentication request.
:returns: str
"""
return self['user'].get('id', None)
@property
def tenant_name(self):
""" Returns the tenant (project) name associated with the
authentication request.
:returns: str
"""
tenant_dict = self['token'].get('tenant', None)
if tenant_dict:
return tenant_dict.get('name', None)
return None
@property
def project_name(self):
""" Synonym for tenant_name """
return self.tenant_name
@property
def scoped(self):
""" Returns true if the authorization token was scoped to a tenant
(project), and contains a populated service catalog.
:returns: bool
"""
if ('serviceCatalog' in self
and self['serviceCatalog']
and 'tenant' in self['token']):
return True
return False
@property
def tenant_id(self):
""" Returns the tenant (project) id associated with the authentication
request, or None if the authentication request wasn't scoped to a
tenant (project).
:returns: str
"""
tenant_dict = self['token'].get('tenant', None)
if tenant_dict:
return tenant_dict.get('id', None)
return None
@property
def project_id(self):
""" Synonym for project_id """
return self.tenant_id
def _get_identity_endpoint(self, endpoint_type):
if not self.get('serviceCatalog'):
return
identity_services = [x for x in self['serviceCatalog']
if x['type'] == 'identity']
return tuple(endpoint[endpoint_type]
for svc in identity_services
for endpoint in svc['endpoints']
if endpoint_type in endpoint)
@property
def auth_url(self):
""" Returns a tuple of URLs from publicURL and adminURL for the service
'identity' from the service catalog associated with the authorization
request. If the authentication request wasn't scoped to a tenant
(project), this property will return None.
:returns: tuple of urls
"""
return self._get_identity_endpoint('publicURL')
@property
def management_url(self):
""" Returns the first adminURL for 'identity' from the service catalog
associated with the authorization request, or None if the
authentication request wasn't scoped to a tenant (project).
:returns: tuple of urls
"""
return self._get_identity_endpoint('adminURL')
| {
"content_hash": "5cf1cf084f3b8cf0cf919bde25ed58fb",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 31.58783783783784,
"alnum_prop": 0.6055614973262032,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "1dc86705af962e439ef87b1c82cd76f6b6ed2c38",
"size": "5274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/python-keystoneclient/keystoneclient/access.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
} |
class Logger(object):
def __init__(self, header, stream):
self._header = header
self._stream = stream
def __call__(self, message):
if self._stream:
self._stream.write(' %-8s | %s\n' % (self._header, message)) | {
"content_hash": "05ad7bc48882268288253f8d8e79d51c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 28.444444444444443,
"alnum_prop": 0.5390625,
"repo_name": "Tallisado/pyrofactory",
"id": "24dd0dcf916fcb54676c0ab5efc33dacb6340d68",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyro_factory/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36079"
}
],
"symlink_target": ""
} |
import os
print(os.getcwd())
fd = open("kant.txt")
content = fd.read()
print(content)
fout = open("file1.txt", "w")
content = fout.write("Alles neu mächt der Mai!")
fout.close()
fapp = open("file2.txt", "a")
content = fapp.write("Alles neu mächt der Mai!\n")
fapp.close()
print(content)
| {
"content_hash": "1d18ffd78722fa8cb28389b37dffeab1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 18.1875,
"alnum_prop": 0.6666666666666666,
"repo_name": "kantel/python-schulung",
"id": "486bd83d6161c9473466c1c790f875705dea1413",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/textfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66215"
}
],
"symlink_target": ""
} |
from .chownref import main
main()
| {
"content_hash": "8e1acfdce2343e8454eed4207b2479ec",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 17,
"alnum_prop": 0.7647058823529411,
"repo_name": "amr/chownref",
"id": "b04b3278d79c81cabae8a57ecca3128217f77291",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chownref/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5832"
}
],
"symlink_target": ""
} |
from pyraat import PraatAnalysisFunction as PyraatFunction
from .functions import BaseAnalysisFunction
class PraatAnalysisFunction(BaseAnalysisFunction):
def __init__(self, praat_script_path, praat_path=None, arguments=None):
super(PraatAnalysisFunction, self).__init__()
self._function = PyraatFunction(praat_script_path,praat_path, arguments)
self.requires_file = True
self.uses_segments = self._function.uses_long
| {
"content_hash": "a90a0ecbdcb268332f7b53988d8db005",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 80,
"avg_line_length": 45.5,
"alnum_prop": 0.7450549450549451,
"repo_name": "mmcauliffe/python-acoustic-similarity",
"id": "eece6814362606dfb647471ecca404cf2f8df068",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conch/analysis/praat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2586"
},
{
"name": "Python",
"bytes": "128013"
},
{
"name": "Shell",
"bytes": "1419"
}
],
"symlink_target": ""
} |
from MyPy.pdu.base import Packet
class EofPacket(Packet):
def from_data(self, data):
self.data = data
return self | {
"content_hash": "965990a05b9bfa0bbb98ac86dfcabe18",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.6330935251798561,
"repo_name": "nasi/MyPy",
"id": "2eac1d6aa4122493d0a3b61c6ac6574b3e6f6d5f",
"size": "139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MyPy/pdu/response/eofpacket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45322"
}
],
"symlink_target": ""
} |
"""
CartoDB Spatial Analysis Python Library
See:
https://github.com/CartoDB/crankshaft
"""
from setuptools import setup, find_packages
setup(
name='crankshaft',
version='0.0.0',
description='CartoDB Spatial Analysis Python Library',
url='https://github.com/CartoDB/crankshaft',
author='Data Services Team - CartoDB',
author_email='dataservices@cartodb.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Mapping comunity',
'Topic :: Maps :: Mapping Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
],
keywords='maps mapping tools spatial analysis geostatistics',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'dev': ['unittest'],
'test': ['unittest', 'nose', 'mock'],
},
# The choice of component versions is dictated by what's
# provisioned in the production servers.
# IMPORTANT NOTE: please don't change this line. Instead issue a ticket to systems for evaluation.
# NOTE2: For Bionic, .travis.yml is editing this line to match dependencies
install_requires=['joblib==0.9.4', 'numpy==1.11.0', 'scipy==0.17.0', 'pysal==1.14.3', 'scikit-learn==0.17.0'],
requires=['pysal', 'numpy', 'sklearn'],
test_suite='test'
)
| {
"content_hash": "e3497f0de95913c3c32cae6650a183d1",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 114,
"avg_line_length": 28.020408163265305,
"alnum_prop": 0.6438455935906774,
"repo_name": "CartoDB/crankshaft",
"id": "d1a2175b3ede717adffec852722c392d4673920b",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/py/crankshaft/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "104176"
},
{
"name": "Makefile",
"bytes": "5965"
},
{
"name": "PLpgSQL",
"bytes": "2108153"
},
{
"name": "Python",
"bytes": "4215676"
},
{
"name": "Shell",
"bytes": "5663"
}
],
"symlink_target": ""
} |
from __future__ import division
from future import standard_library
standard_library.install_aliases()
import logging
import re
import fnmatch
import configparser
import math
import os
from urllib.parse import urlparse
import warnings
import boto
from boto.s3.connection import S3Connection, NoHostProvided
from boto.sts import STSConnection
boto.set_stream_logger('boto')
logging.getLogger("boto").setLevel(logging.INFO)
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _parse_s3_config(config_file_name, config_format='boto', profile=None):
"""
Parses a config file for s3 credentials. Can currently
parse boto, s3cmd.conf and AWS SDK config formats
:param config_file_name: path to the config file
:type config_file_name: str
:param config_format: config type. One of "boto", "s3cmd" or "aws".
Defaults to "boto"
:type config_format: str
:param profile: profile name in AWS type config file
:type profile: str
"""
Config = configparser.ConfigParser()
if Config.read(config_file_name): # pragma: no cover
sections = Config.sections()
else:
raise AirflowException("Couldn't read {0}".format(config_file_name))
# Setting option names depending on file format
if config_format is None:
config_format = 'boto'
conf_format = config_format.lower()
if conf_format == 'boto': # pragma: no cover
if profile is not None and 'profile ' + profile in sections:
cred_section = 'profile ' + profile
else:
cred_section = 'Credentials'
elif conf_format == 'aws' and profile is not None:
cred_section = profile
else:
cred_section = 'default'
# Option names
if conf_format in ('boto', 'aws'): # pragma: no cover
key_id_option = 'aws_access_key_id'
secret_key_option = 'aws_secret_access_key'
# security_token_option = 'aws_security_token'
else:
key_id_option = 'access_key'
secret_key_option = 'secret_key'
# Actual Parsing
if cred_section not in sections:
raise AirflowException("This config file format is not recognized")
else:
try:
access_key = Config.get(cred_section, key_id_option)
secret_key = Config.get(cred_section, secret_key_option)
calling_format = None
if Config.has_option(cred_section, 'calling_format'):
calling_format = Config.get(cred_section, 'calling_format')
except:
logging.warning("Option Error in parsing s3 config file")
raise
return (access_key, secret_key, calling_format)
class S3Hook(BaseHook):
"""
Interact with S3. This class is a wrapper around the boto library.
"""
def __init__(
self,
s3_conn_id='s3_default'):
self.s3_conn_id = s3_conn_id
self.s3_conn = self.get_connection(s3_conn_id)
self.extra_params = self.s3_conn.extra_dejson
self.profile = self.extra_params.get('profile')
self.calling_format = None
self.s3_host = None
self._creds_in_conn = 'aws_secret_access_key' in self.extra_params
self._creds_in_config_file = 's3_config_file' in self.extra_params
self._default_to_boto = False
if 'host' in self.extra_params:
self.s3_host = self.extra_params['host']
if self._creds_in_conn:
self._a_key = self.extra_params['aws_access_key_id']
self._s_key = self.extra_params['aws_secret_access_key']
if 'calling_format' in self.extra_params:
self.calling_format = self.extra_params['calling_format']
elif self._creds_in_config_file:
self.s3_config_file = self.extra_params['s3_config_file']
# The format can be None and will default to boto in the parser
self.s3_config_format = self.extra_params.get('s3_config_format')
else:
self._default_to_boto = True
# STS support for cross account resource access
self._sts_conn_required = ('aws_account_id' in self.extra_params or
'role_arn' in self.extra_params)
if self._sts_conn_required:
self.role_arn = (self.extra_params.get('role_arn') or
"arn:aws:iam::" +
self.extra_params['aws_account_id'] +
":role/" +
self.extra_params['aws_iam_role'])
self.connection = self.get_conn()
def __getstate__(self):
pickled_dict = dict(self.__dict__)
del pickled_dict['connection']
return pickled_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['connection'] = self.get_conn()
def _parse_s3_url(self, s3url):
warnings.warn(
'Please note: S3Hook._parse_s3_url() is now '
'S3Hook.parse_s3_url() (no leading underscore).',
DeprecationWarning)
return self.parse_s3_url(s3url)
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def get_conn(self):
"""
Returns the boto S3Connection object.
"""
if self._default_to_boto:
return S3Connection(profile_name=self.profile)
a_key = s_key = None
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
calling_format = self.calling_format
s3_host = self.s3_host
if calling_format is None:
calling_format = 'boto.s3.connection.SubdomainCallingFormat'
if s3_host is None:
s3_host = NoHostProvided
if self._sts_conn_required:
sts_connection = STSConnection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
profile_name=self.profile)
assumed_role_object = sts_connection.assume_role(
role_arn=self.role_arn,
role_session_name="Airflow_" + self.s3_conn_id
)
creds = assumed_role_object.credentials
connection = S3Connection(
aws_access_key_id=creds.access_key,
aws_secret_access_key=creds.secret_key,
calling_format=calling_format,
security_token=creds.session_token
)
else:
connection = S3Connection(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
calling_format=calling_format,
host=s3_host,
profile_name=self.profile)
return connection
def get_credentials(self):
if self._creds_in_config_file:
a_key, s_key, calling_format = _parse_s3_config(self.s3_config_file,
self.s3_config_format,
self.profile)
elif self._creds_in_conn:
a_key = self._a_key
s_key = self._s_key
return a_key, s_key
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.lookup(bucket_name) is not None
def get_bucket(self, bucket_name):
"""
Returns a boto.s3.bucket.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
return self.connection.get_bucket(bucket_name)
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
keylist = list(b.list(prefix=prefix, delimiter=delimiter))
return [k.name for k in keylist] if keylist != [] else None
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
b = self.get_bucket(bucket_name)
plist = b.list(prefix=prefix, delimiter=delimiter)
prefix_names = [p.name for p in plist
if isinstance(p, boto.s3.prefix.Prefix)]
return prefix_names if prefix_names != [] else None
def check_for_key(self, key, bucket_name=None):
"""
Checks that a key exists in a bucket
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key) is not None
def get_key(self, key, bucket_name=None):
"""
Returns a boto.s3.key.Key object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
return bucket.get_key(key)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto.s3.key.Key object matching the regular expression
:param regex_key: the path to the key
:type regex_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
bucket = self.get_bucket(bucket_name)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if not klist:
return None
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
return bucket.get_key(key_matches[0]) if key_matches else None
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def load_file(
self,
filename,
key,
bucket_name=None,
replace=False,
multipart_bytes=5 * (1024 ** 3),
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param multipart_bytes: If provided, the file is uploaded in parts of
this size (minimum 5242880). The default value is 5GB, since S3
cannot accept non-multipart uploads for files larger than 5GB. If
the file is smaller than the specified limit, the option will be
ignored.
:type multipart_bytes: int
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
key_size = os.path.getsize(filename)
if multipart_bytes and key_size >= multipart_bytes:
# multipart upload
from filechunkio import FileChunkIO
mp = bucket.initiate_multipart_upload(key_name=key,
encrypt_key=encrypt)
total_chunks = int(math.ceil(key_size / multipart_bytes))
sent_bytes = 0
try:
for chunk in range(total_chunks):
offset = chunk * multipart_bytes
bytes = min(multipart_bytes, key_size - offset)
with FileChunkIO(
filename, 'r', offset=offset, bytes=bytes) as fp:
logging.info('Sending chunk {c} of {tc}...'.format(
c=chunk + 1, tc=total_chunks))
mp.upload_part_from_file(fp, part_num=chunk + 1)
except:
mp.cancel_upload()
raise
mp.complete_upload()
else:
# regular upload
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_filename(filename,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
def load_string(self, string_data,
key, bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
This is provided as a convenience to drop a file in S3. It uses the
boto infrastructure to ship a file to s3. It is currently using only
a single part download, and should not be used to move large files.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
if not replace and key_obj:
raise ValueError("The key {key} already exists.".format(
**locals()))
if not key_obj:
key_obj = bucket.new_key(key_name=key)
key_size = key_obj.set_contents_from_string(string_data,
replace=replace,
encrypt_key=encrypt)
logging.info("The key {key} now contains"
" {key_size} bytes".format(**locals()))
def download_file(self,
bucket_name,
key,
download_location):
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
bucket = self.get_bucket(bucket_name)
key_obj = bucket.get_key(key)
key_obj.get_contents_to_filename(download_location)
| {
"content_hash": "5362ee14b62a9a5871b3b58701d65a40",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 82,
"avg_line_length": 39.682870370370374,
"alnum_prop": 0.559820334830543,
"repo_name": "brandsoulmates/incubator-airflow",
"id": "661f8d58f9feecfc6ff70bf96d506af5f096c3b8",
"size": "17710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/hooks/S3_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57001"
},
{
"name": "HTML",
"bytes": "145755"
},
{
"name": "JavaScript",
"bytes": "1364212"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2063745"
},
{
"name": "Shell",
"bytes": "20906"
}
],
"symlink_target": ""
} |
'''
If something inherits from Talker, then we can print
text to the terminal in a relatively standard way.
'''
import textwrap, numpy as np, pprint
import sys
if sys.version_info[0] < 3:
input = raw_input
shortcuts = None
class Talker(object):
'''
Objects the inherit from Talker have "mute" and "pithy" attributes,
a report('uh-oh!') method that prints when unmuted,
a speak('yo!') method that prints only when unmuted and unpithy,
and an input("what's up?") method that takes input from the prompt.
'''
_mute = False
_pithy = False
line = np.inf
@property
def nametag(self):
return self.__class__.__name__.lower()
def speak(self, string='', level=0, progress=False):
'''If verbose=True and terse=False, this will print to terminal. Otherwise, it won't.'''
if self._pithy == False:
self.report(string=string, level=level, progress=progress)
def warning(self, string='', level=0):
'''If verbose=True and terse=False, this will print to terminal. Otherwise, it won't.'''
self.report(string, level, prelude=':-| ')
def input(self, string='', level=0, prompt='(please respond) '):
'''If verbose=True and terse=False, this will print to terminal. Otherwise, it won't.'''
self.report(string, level)
return input("{0}".format(self._prefix + prompt))
def report(self, string='', level=0, prelude='', progress=False, abbreviate=True):
'''If verbose=True, this will print to terminal. Otherwise, it won't.'''
if self._mute == False:
self._prefix = prelude + '{spacing}[{name}] '.format(name = self.nametag, spacing = ' '*level)
self._prefix = "{0:>16}".format(self._prefix)
equalspaces = ' '*len(self._prefix)
toprint = string + ''
if abbreviate:
if shortcuts is not None:
for k in shortcuts.keys():
toprint = toprint.replace(k, shortcuts[k])
if progress:
print('\r' + self._prefix + toprint.replace('\n', '\n' + equalspaces),)
else:
print(self._prefix + toprint.replace('\n', '\n' + equalspaces))
#print textwrap.fill(self._prefix + toprint.replace('\n', '\n' + equalspaces), self._line, subsequent_indent=equalspaces + '... ')
def summarize(self):
'''Print a summary of the contents of this object.'''
self.speak('Here is a brief summary of {}.'.format(self.nametag))
s = '\n'+pprint.pformat(self.__dict__)
print(s.replace('\n', '\n'+' '*(len(self._prefix)+1)) + '\n')
| {
"content_hash": "0d167e96baca2981f2f3546cf6e22985",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 142,
"avg_line_length": 40.83076923076923,
"alnum_prop": 0.591183119819141,
"repo_name": "zkbt/exopop",
"id": "74d7f9858865f12a61359ed16e11f2101f2cda81",
"size": "2654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exoatlas/talker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91104"
}
],
"symlink_target": ""
} |
import collections
import errno
import json
import time
import importlib
# from utils.utilfuncs import safeprint
from ..hubs import HubInitError
import websocket
from typing import Callable, Union
import config
import debug
from . import haremote as ha
import historybuffer
import logsupport
from utils import threadmanager, hw, utilfuncs
from controlevents import CEvent, PostEvent, ConsoleEvent, PostIfInterested
from logsupport import ConsoleWarning, ConsoleError, ConsoleDetail, ConsoleInfo
from stores import valuestore, haattraccess
from utils.utilities import CheckPayload
from utils.utilfuncs import safeprint
AddIgnoredDomain: Union[Callable, None] = None # type Union[Callable, None]
IgnoredDomains = []
# above gets filled in by ignore to avoid import loop
ignoredeventtypes = [
'system_log_event', 'service_executed', 'logbook_entry', 'timer_out_of_sync', 'result',
'persistent_notifications_updated', 'automation_triggered', 'script_started', 'service_removed', 'hacs/status',
'hacs/repository', 'hacs/config', 'entity_registry_updated', 'component_loaded', 'device_registry_updated',
'entity_registry_updated', 'lovelace_updated', 'isy994_control', 'core_config_updated', 'homeassistant_start',
'config_entry_discovered', 'automation_reloaded', 'hacs/stage', 'hacs/reload', 'zwave_js_value_notification',
'event_template_reloaded', 'panels_updated', 'data_entry_flow_progressed', 'scene_reloaded',
'area_registry_updated', 'keymaster_lock_state_changed', 'zwave_js_notification', 'repairs_issue_registry_updated',
'amcrest']
def stringtonumeric(v):
if not isinstance(v, str):
return v
# noinspection PyBroadException
try:
f = float(v)
return f
except:
pass
# noinspection PyBroadException
try:
i = int(v)
return i
except:
pass
return v
from ast import literal_eval
class HAnode(object):
def __init__(self, HAitem, **entries):
self.entity_id = ''
self.object_id = ''
self.name = ''
self.attributes = {}
self.state = 0
self.internalstate = self._NormalizeState(self.state)
self.__dict__.update(entries)
if 'friendly_name' in self.attributes: self.FriendlyName = self.attributes['friendly_name']
self.address = self.entity_id
self.Hub = HAitem
self.domname = 'unset'
def DisplayStuff(self, prefix, withattr=False):
d = dict(vars(self))
if not withattr: del d['attributes']
safeprint(prefix, d)
def LogNewEntity(self, newstate):
logsupport.Logs.Log(
"New entity since startup seen from {}: {} (Domain: {}) New: {}".format(
self.Hub.name, self.entity_id, self.domname, repr(newstate)))
# def Update(self, **ns):
# # just updates last triggered etc.
# self.__dict__.update(ns)
def Update(self, **ns):
if self.entity_id in self.Hub.MonitoredAttributes:
val = ns['attributes']
try:
for attr in self.Hub.MonitoredAttributes[self.entity_id]:
val = val[attr]
except KeyError:
val = None
self.Hub.attrstore.SetVal([self.entity_id] + self.Hub.MonitoredAttributes[self.entity_id], val)
self.__dict__.update(ns)
oldstate = self.internalstate
self.internalstate = self._NormalizeState(self.state)
if self.internalstate == -1:
logsupport.Logs.Log(
"{} ({}) set unavailable (was {})".format(self.name, self.entity_id, str(oldstate))
) # , severity=ConsoleDetail)
if oldstate == -1 and self.internalstate != -1:
logsupport.Logs.Log(
"{} ({}) set available ({})".format(self.name, self.entity_id, str(self.internalstate))
) #, severity=ConsoleDetail)
PostIfInterested(self.Hub, self.entity_id, self.internalstate)
def _NormalizeState(self, state, brightness=None): # may be overridden for domains with special state settings
if isinstance(state, str):
if state == 'on':
if brightness is not None:
return brightness
else:
return 255
elif state == 'off':
return 0
elif state == 'scening': # scenes really have no state but using -1 would cause X display
return 0
elif state in ['unavailable', 'unknown']:
return -1
else:
try:
val = literal_eval(state)
except ValueError:
logsupport.Logs.Log('{} reports unknown state: {}'.format(self.Hub.name, state),
severity=ConsoleError, tb=False)
return -1
else:
val = state
if isinstance(val, float):
if val.is_integer():
return int(val)
return val
def SendSpecialCmd(self, cmd, target, params):
# This should get the target domain, check that the cmd applies, validate the params, and send the command to the hub
spccmds = self.Hub.SpecialCmds
targdom, targent = target.split('.')
if cmd in spccmds[targdom] and spccmds[targdom][cmd]['target'] == targdom:
thiscmd = spccmds[targdom][cmd]
# Normal command targeting an entity in the domain
for p, val in params.items():
if p not in thiscmd:
logsupport.Logs.Log('Invalid paramter {} for command {}'.format(p, cmd), severity=ConsoleWarning)
raise KeyError(p)
# send the command
serviceparams = dict(params)
serviceparams['entity_id'] = target
ha.call_service_async(self.Hub.api, targdom, cmd, service_data=serviceparams)
else:
logsupport.Logs.Log('Invalid special command {}({}} set at {}'.format(cmd, params, target),
severity=ConsoleWarning)
raise ValueError
def SendOnOffCommand(self, settoon):
pass
def SendOnOffFastCommand(self, settoon):
pass
def __str__(self):
return str(self.name) + '::' + str(self.state)
class Indirector(object):
# used as a placeholder if config names a node that isn't in HA - allows for late discovery of HA nodes
# in GetNode if name doesn't exist create one of these and return it
# in the stream handling if new entity is seen create the node and plug it in here
# Indirector has a field Undefined that gets set False once a node is linked.
def __init__(self, Hub, name):
self.Undefined = True
self.realnode = None
self.Hub = Hub
self.impliedname = name
self.reportederror = False
Hub.Indirectors[name] = self
logsupport.Logs.Log('Creating indirector for missing {} node {}'.format(Hub.name,name),severity=ConsoleWarning)
def SetRealNode(self, node):
self.realnode = node
self.Undefined = False
logsupport.Logs.Log('Real node appeared for hub {} node {}'.format(self.Hub.name,self.impliedname))
def __getattr__(self, name):
# noinspection PyBroadException
try:
return getattr(self.realnode, name)
except:
if name == 'name': return self.impliedname
if name == 'address': return self.impliedname
if name == 'FriendlyName': return self.impliedname
if not self.reportederror:
logsupport.Logs.Log(
'Attempt to access uncompleted indirector for hub {} node {} (call {})'.format(self.Hub.name,
self.impliedname,
name),
severity=ConsoleWarning)
self.reportederror = True
hadomains = {}
domainspecificevents = {}
specialcommands = {}
def DomainSpecificEvent(e, message):
logsupport.Logs.Log('Default event handler {} {}'.format(e, message))
pass
def RegisterDomain(domainname, domainmodule, eventhdlr=DomainSpecificEvent, speccmd=None):
if domainname in hadomains:
logsupport.Logs.Log("Redundant registration of HA domain {}".format(domainname))
hadomains[domainname] = domainmodule
domainspecificevents[domainname] = eventhdlr
specialcommands[domainname] = speccmd
class HA(object):
class HAClose(Exception):
pass
def GetNode(self, name, proxy=''):
if proxy == '':
pn = name
elif ':' in proxy:
t = proxy.split(':')
if t[0] == self.name:
pn = t[1]
else:
logsupport.Logs.Log("{}: Proxy must be in same hub as button {}".format(self.name, proxy))
pn = name
else:
pn = proxy
try:
return self.Entities[name], self.Entities[pn]
except KeyError:
if pn not in self.Entities:
logsupport.Logs.Log("{}: Attempting to use unknown Proxy {}".format(self.name, pn),
severity=ConsoleWarning)
if name not in self.Entities:
logsupport.Logs.Log("{}: Attempting to access unknown object: {}".format(self.name, name),
severity=ConsoleWarning)
I = Indirector(self, name)
return I, I
except Exception as E:
logsupport.Logs.Log("{}: Exception in GetNode: {}".format(self.name, E), severity=ConsoleWarning)
return None, None
def GetProgram(self, name):
try:
return self.DomainEntityReg['automation']['automation.' + name]
except KeyError:
pass
try:
return self.DomainEntityReg['script']['script.' + name]
except KeyError:
logsupport.Logs.Log("Attempt to access unknown program: " + name + " in HA Hub " + self.name,
severity=ConsoleWarning)
return None
def GetCurrentStatus(self, MonitorNode):
# noinspection PyBroadException
try:
return MonitorNode.internalstate
except:
# ** part of handling late discovered nodes
logsupport.Logs.Log("Error accessing current state in HA Hub: " + self.name + ' ' + repr(MonitorNode),
severity=ConsoleWarning)
return None
def _StatusChecker(self):
worktodo = True
while worktodo:
templist = dict(self.UnknownList)
for node in templist:
# noinspection PyUnusedLocal
e = self.GetActualState(node.name)
# ** should post e as a node state change
time.sleep(10)
worktodo = bool(self.UnknownList)
def StartStatusChecker(self):
# logic here would be to start a thread that runs while List is non-empty - need to be careful regarding it changing length
# while in the loop. Also needs to be conservative about stopping and the starter needs to double-check the is alive in some way
# so as not to get caught with an entry but not running.
pass
def AddToUnknowns(self, node): # ** flesh out
# need to start a thread that checks periodically the status of the node. When it changes to known value that thread should exit (perhaps post?)
# "delete" would get triggered the next time the paint is called (or would it? - can the change to real value happen under the covers?) Maybe don't need to do the delete
# since the thread will be not alive - can just start the thread if not alive and let it die peacefully after doing its job?
self.UnknownList[node.name] = node
# need a single slot for the node status checker thread per hub instance check is_alive on each entry. Worst case on the next key repaint this will get
# called again and the status checking will start.
logsupport.Logs.Log('{}: Adding {} to unknowns list {}'.format(self.name, node.name, self.UnknownList),
severity=ConsoleWarning)
if self.UnknownList:
if self.StatusCheckerThread is None:
self.StartStatusChecker()
elif not self.StatusCheckerThread.is_alive():
self.StartStatusChecker()
# noinspection DuplicatedCode
def DeleteFromUnknowns(self, node):
try:
del self.UnknownList[node.name]
logsupport.Logs.Log('{}: Deleted {} from unknowns list {}'.format(self.name, node.name, self.UnknownList),
severity=ConsoleWarning)
except Exception as E:
logsupport.Logs.Log(
'{}: Failed attempt to delete {} from unknowns list {} ({})'.format(self.name, node.name,
self.UnknownList, E),
severity=ConsoleWarning)
def AddDevice(self, device, entitylist):
if device is None:
print('Add none device?')
return
self.DeviceToEnt[device] = entitylist
self.DevGoneCounts[device] = 0
def NoteDeviceGone(self, device):
if device is None: return
self.DevGoneCounts[device] += 1
def DevGoneCount(self, device):
if device is None: return
return self.DevGoneCounts[device]
def GetActualState(self, ent):
try:
e = ha.get_state(self.api, ent)
except Exception as E:
logsupport.Logs.Log('{}: State check did not complete for {} exc: {}'.format(self.name, ent, E),
severity=ConsoleWarning)
e = -1
return e
# end of WIP for checking actual status with hub
def CheckStates(self):
# noinspection PyBroadException
try:
for n, s in self.DomainEntityReg['sensor'].items():
cacheval = self.attrstore.GetVal(s.entity_id)
e = ha.get_state(self.api, s.entity_id)
if e is None:
actualval = '*unknown*'
else:
actualval = e.state
if cacheval != type(cacheval)(actualval):
logsupport.Logs.Log(
'Sensor value anomoly(' + self.name + '): Cached: ' + str(cacheval) + ' Actual: ' + str(
actualval), severity=ConsoleWarning, hb=True)
logsupport.DevPrint(
'Check anomoly for {}: cache: {} actual: {}'.format(self.name, cacheval, actualval))
self.attrstore.SetVal(s.entity_id, actualval)
except Exception as E:
logsupport.Logs.Log('Sensor value check did not complete: {}'.format(repr(E)), severity=ConsoleWarning)
def SetAlertWatch(self, node, alert):
if node.address in self.AlertNodes:
self.AlertNodes[node.address].append(alert)
else:
self.AlertNodes[node.address] = [alert]
def StatesDump(self):
with open('/home/pi/Console/{}Dump.txt'.format(self.name), mode='w') as f:
for n, nd in self.Entities.items():
f.write('Node({}) {}: -> {} {} {}\n'.format(type(nd), n, nd.internalstate, nd.state, type(nd.state)))
def HACheckThread(self):
if self.haconnectstate != "Running":
logsupport.Logs.Log(
"{}({}) failed thread check; state: {}".format(self.name, self.HAnum, self.haconnectstate),
severity=ConsoleWarning)
return False
return True
def PreRestartHAEvents(self):
self.haconnectstate = "Prestart"
trycnt = 60
while True:
self.config = ha.get_config(self.api)
if self.config != {}:
break # HA is up
trycnt -= 1
if trycnt < 0:
logsupport.Logs.Log("{}: Waiting for HA to come up - retrying: ".format(self.name),
severity=ConsoleWarning)
trycnt = 60
time.sleep(1) # don't flood network
self.watchstarttime = time.time()
self.HAnum += 1
def PostStartHAEvents(self):
# todo need to get all the current state since unlike ISY, HA doesn't just push it
while self.haconnectstate == "Delaying":
time.sleep(1)
i = 0
while self.haconnectstate != "Running":
i += 1
if i > 60:
logsupport.Logs.Log("{} not running after thread start ({})".format(self.name, self.haconnectstate),
severity=ConsoleError)
time.sleep(1)
i = 0
i = 3
while i > 0:
try:
ha.call_service(self.api, 'logbook', 'log',
{'name': 'Softconsole', 'message': hw.hostname + ' connected'})
return
except ha.HomeAssistantError:
i -= 1
if i == 0:
logsupport.Logs.Log(self.name + " not responding to service call after restart",
severity=ConsoleWarning)
return
else:
time.sleep(1)
def RegisterEntity(self, domain, entity, item):
if domain in self.DomainEntityReg:
if entity in self.DomainEntityReg[domain]:
logsupport.Logs.Log('Duplicate entity reported in {} hub {}: {}'.format(self.name, domain, entity))
else:
self.DomainEntityReg[domain][entity] = item
else:
self.DomainEntityReg[domain] = {entity: item}
def GetAllCurrentState(self):
entities = ha.get_states(self.api)
# with open('/home/pi/Console/msglog{}'.format(self.name), 'a') as f:
# f.write('----------REFRESH\n')
for e in entities:
try:
p2 = dict(e.as_dict(), **{'domain': e.domain, 'name': e.name, 'object_id': e.object_id})
if e.entity_id in self.Entities:
self.Entities[e.entity_id].Update(**p2)
elif e.domain in IgnoredDomains:
pass # continue to ignore these
else:
logsupport.Logs.Log("{} restart found new entity {} state: {}".format(self.name, e, p2),
severity=ConsoleWarning)
# it's new
except Exception as E:
logsupport.Logs.Log(
"{}: Exception in getting current states for {} Exception: {}".format(self.name, e.entity_id, E),
severity=ConsoleWarning)
def HAevents(self):
def findDiff(d1, d2):
chg = {}
dels = {}
adds = {}
old = {} if d1 is None else d1
new = {} if d2 is None else d2
for k in new.keys():
if not k in old:
adds[k] = new[k]
for k in old.keys():
if k in new:
if isinstance(old[k], dict):
c, d, a = findDiff(old[k], new[k])
if c != {}: chg[k] = c
if d != {}: dels[k] = d
if a != {}: adds[k] = a
# chg[k], dels[k], adds[k] = findDiff(d1[k], d2[k])
else:
if old[k] != new[k]:
chg[k] = new[k]
else:
dels[k] = old[k]
return chg, dels, adds
# noinspection PyUnusedLocal
def on_message(qws, message):
prog = 0
loopstart = time.time()
self.HB.Entry(repr(message))
# logsupport.Logs.Log("-->{}".format(repr(message)))
# with open('/home/pi/Console/msglog{}'.format(self.name),'a') as f:
# f.write('{}\n'.format(repr(message)))
adds = []
chgs = []
dels = []
new = []
old = []
try:
self.msgcount += 1
# if self.msgcount <4: logsupport.Logs.Log(self.name + " Message "+str(self.msgcount)+':'+ repr(message))
# noinspection PyBroadException
try:
mdecode = json.loads(CheckPayload(message, 'none', 'hasshubmsg'))
except:
logsupport.Logs.Log("HA event with bad message: ", message, severity=ConsoleError)
return
if mdecode['type'] == 'auth_ok':
debug.debugPrint('HASSgeneral', 'WS Authorization OK, subscribing')
self.ws.send(
json.dumps(
{'id': self.HAnum, 'type': 'subscribe_events'})) # , 'event_type': 'state_changed'}))
return
if mdecode['type'] == 'auth_required':
debug.debugPrint('HASSgeneral', 'WS Authorization requested, sending')
self.ws.send(json.dumps({"type": "auth", "access_token": self.password}))
return
if mdecode['type'] == 'auth_invalid':
logsupport.Logs.Log("Invalid password for hub: " + self.name + '(' + str(self.msgcount) + ')',
repr(message),
severity=ConsoleError,
tb=False) # since already validate with API shouldn't get here
return
if mdecode['type'] == 'platform_discovered':
logsupport.Logs.Log('{} discovered platform: {}'.format(self.name, message))
if mdecode['type'] != 'event':
debug.debugPrint('HASSgeneral', 'Non event seen on WS stream: ', str(mdecode))
return
m = mdecode['event']
del mdecode['event']
d = m['data']
if m['event_type'] == 'state_changed':
prog = 1
del m['event_type']
ent = d['entity_id']
dom, nm = ent.split('.')
new = d['new_state']
old = d['old_state']
del d['new_state']
del d['old_state']
del d['entity_id']
if ent == 'light.bar_lights': safeprint(
'{} {} -> {}'.format(time.strftime('%m-%d-%y %H:%M:%S', time.localtime()), old, new))
prog = 1.5
chgs, dels, adds = findDiff(old, new)
prog = 2
if not ent in self.Entities:
# not an entitity type that is currently known
debug.debugPrint('HASSgeneral', self.name,
' WS Stream item for unhandled entity: ' + ent + ' Added: ' + str(
adds) + ' Deleted: ' + str(dels) + ' Changed: ' + str(chgs))
if dom in self.addibledomains:
p2 = dict(new, **{'domain': dom, 'name': nm, 'object_id': ent})
N = hadomains[dom](self, p2)
self.Entities[ent] = N
N.AddPlayer() # todo specific to media player?
if ent in self.Indirectors: # expected node finally showed up
p2 = dict(new, **{'domain': dom,
'name': new['attributes']['friendly_name'] if 'friendly_name' in new[
'attributes'] else nm.replace('_', ' '), 'object_id': ent})
if dom in hadomains:
N = hadomains[dom](self, p2)
self.Indirectors[ent].SetRealNode(N)
del self.Indirectors[ent]
self.Entities[ent] = N
logsupport.Logs.Log('Indirector from {} for {} resolved'.format(self.name, ent))
else:
del self.Indirectors[ent]
logsupport.Logs.Log('Indirector in {} for {} not for a supported domain {}'.format(self.name,ent,dom))
else:
if old is not None:
logsupport.Logs.Log(
"New entity seen with 'old' state from {}: {} (Domain: {}) (Old: {} New: {})".format(
self.name, ent, dom, repr(old), repr(new)))
p2 = dict(new, **{'domain': dom, 'name': nm, 'object_id': ent})
if dom not in hadomains:
AddIgnoredDomain(dom)
logsupport.Logs.Log('New domain seen from {}: {}'.format(self.name, dom))
if config.sysStore.versionname in ('development', 'homerelease'):
with open('{}/Console/{}-entities'.format(config.sysStore.HomeDir, self.name),
'a') as f:
safeprint('New ignored entity in {}: {} {}'.format(self.name, dom, ent), file=f)
N = hadomains[dom](self, p2)
N.LogNewEntity(repr(new))
self.Entities[ent] = N # only report once
return
elif new is not None:
prog = 3
self.Entities[ent].Update(**new)
self.HB.Entry(
'Change to {} Added: {} Deleted: {} Changed: {}'.format(ent, str(adds), str(dels), str(chgs)))
if m['origin'] == 'LOCAL': del m['origin']
if m['data'] == {}: del m['data']
timefired = m['time_fired']
del m['time_fired']
if m != {}: self.HB.Entry('Extras @ {}: {}'.format(timefired, repr(m)))
if ent in self.AlertNodes:
# alert node changed
self.HB.Entry('Report change to: {}'.format(ent))
for a in self.AlertNodes[ent]:
logsupport.Logs.Log("Node alert fired: " + str(a), severity=ConsoleDetail)
# noinspection PyArgumentList
PostEvent(ConsoleEvent(CEvent.ISYAlert, node=ent, hub=self.name,
value=self.Entities[ent].internalstate, alert=a))
elif m['event_type'] == 'call_service':
d = m['data']
if d['domain'] == 'homeassistant' and d['service'] == 'restart':
# only pay attention to restarts
logsupport.Logs.Log('{}: Restarting, suppress errors until restarted'.format(self.name))
self.restarting = True
self.restartingtime = time.time()
# else:
# logsupport.Logs.Log('Saw {}'.format(d))
elif m['event_type'] == 'system_log_event':
logsupport.Logs.Log('Hub: ' + self.name + ' logged at level: ' + d['level'] + ' Msg: ' + d[
'message'])
elif m['event_type'] == 'service_registered': # fix plus add service removed
d = m['data']
if d['domain'] not in self.knownservices:
self.knownservices[d['domain']] = {}
if d['service'] not in self.knownservices[d['domain']]:
self.knownservices[d['domain']][d['service']] = d['service']
logsupport.Logs.Log(
"{} has new service: {}".format(self.name, message), severity=ConsoleDetail)
elif m['event_type'] in ignoredeventtypes:
pass
elif '.' in m['event_type']:
# domain specific event
d, ev = m['event_type'].split('.')
if d in domainspecificevents:
domainspecificevents[d](ev, message)
elif m['event_type'] == 'homeassistant_started':
# HA just finished initializing everything, so we may have been quicker - refresh all state
# with open('/home/pi/Console/msglog{}'.format(self.name), 'a') as f:
# f.write('DO REFRESH FOR STARTED')
self.GetAllCurrentState()
else:
logsupport.Logs.Log('{} Unknown event: {}'.format(self.name, message), severity=ConsoleWarning)
ignoredeventtypes.append(m['event_type']) # only log once
debug.debugPrint('HASSgeneral', "Unknown event: " + str(m))
except Exception as E:
logsupport.Logs.Log("Exception handling HA message: ({}) {} {}".format(prog, repr(E), repr(message)),
severity=ConsoleWarning,
tb=True, hb=True)
if prog == 1.5:
logsupport.Logs.Log("Diff error {}:::{}".format(old, new))
elif prog == 2:
logsupport.Logs.Log("Post diff: {}:::{}:::{}".format(adds, dels, chgs))
loopend = time.time()
self.HB.Entry('Processing time: {} Done: {}'.format(loopend - loopstart, repr(message)))
time.sleep(.1) # force thread to give up processor to allow response to time events
# self.HB.Entry('Gave up control for: {}'.format(time.time() - loopend))
def on_error(qws, error):
self.HB.Entry('ERROR: ' + repr(error))
self.lasterror = error
# noinspection PyBroadException
try:
if error.args[0] == "'NoneType' object has no attribute 'connected'":
# library bug workaround - get this error after close happens just ignore
logsupport.Logs.Log("WS lib workaround hit (1)", severity=ConsoleWarning) # tempdel
return
except:
pass
logsupport.Logs.Log("WS lib workaround hit (2)", severity=ConsoleWarning) # tempdel
if isinstance(error, websocket.WebSocketConnectionClosedException):
logsupport.Logs.Log(self.name + " closed WS stream " + str(self.HAnum) + "; attempt to reopen",
severity=ConsoleWarning if not self.restarting else ConsoleInfo)
elif isinstance(error, ConnectionRefusedError):
logsupport.Logs.Log(self.name + " WS socket refused connection", severity=ConsoleWarning)
elif isinstance(error, TimeoutError):
logsupport.Logs.Log(self.name + " WS socket timed out", severity=ConsoleWarning)
elif isinstance(error, OSError):
if error.errno == errno.ENETUNREACH:
logsupport.Logs.Log(self.name + " WS network down", severity=ConsoleWarning)
else:
logsupport.Logs.Log(self.name + ' WS OS error', repr(error), severity=ConsoleError, tb=False)
else:
logsupport.Logs.Log(self.name + ": Unknown Error in WS stream " + str(self.HAnum) + ':' + repr(error),
severity=ConsoleWarning)
# noinspection PyBroadException
try:
if isinstance(error, AttributeError):
# error = (errno.ETIMEDOUT,"Websock bug catch")
logsupport.Logs.Log("WS lib workaround hit (3)", severity=ConsoleWarning) # tempdel
except:
pass
self.haconnectstate = "Failed"
qws.close()
# noinspection PyUnusedLocal
def on_close(qws, code, reason):
"""
:param reason: str
:param code: int
:type qws: websocket.WebSocketApp
"""
self.HB.Entry('Close')
logsupport.Logs.Log(
self.name + " WS stream " + str(self.HAnum) + " closed: " + str(code) + ' : ' + str(reason),
severity=ConsoleWarning if not self.restarting else ConsoleInfo, tb=False, hb=True)
if self.haconnectstate != "Failed": self.haconnectstate = "Closed"
# noinspection PyUnusedLocal
def on_open(qws):
# todo if ws never opens then an error doesn't cause a thread restart - not sure why but should track down
# possible logic - record successful open then if error while not yet open cause console to restart by setting some
# global flag? Flag would be checked in main gui loop and cause a restart. It is a one way comm from the threads so
# should not be subject to a race
# with open('/home/pi/Console/msglog{}'.format(self.name),'a') as f:
# f.write('----------OPEN\n')
self.HB.Entry('Open')
if self.restarting:
logsupport.Logs.Log('{}: WS Stream {} opened (HA restart took: {} secs.)'.format(self.name, self.HAnum,
time.time() - self.restartingtime))
else:
logsupport.Logs.Log("{}: WS stream {} opened".format(self.name, self.HAnum))
# refresh state after the web socket stream is open
self.GetAllCurrentState()
self.haconnectstate = "Running"
self.restarting = False
self.haconnectstate = "Starting"
websocket.setdefaulttimeout(30)
# import logging
# logging.basicConfig(filename='/home/pi/WSlog',level=logging.INFO)
# WStrace = open('/home/pi/WStrace','a')
# safeprint('Open {}'.format(self.wsurl),file=WStrace)
# websocket.enableTrace(True,handler=logging.StreamHandler(stream=WStrace))
try:
# websocket.enableTrace(True)
# noinspection PyProtectedMember
self.ws = websocket.WebSocketApp(self.wsurl, on_message=on_message,
on_error=on_error,
on_close=on_close, on_open=on_open, header=self.api._headers)
self.msgcount = 0
except AttributeError as e:
logsupport.Logs.Log(self.name + ": Problem starting WS handler - retrying: ", repr(e),
severity=ConsoleWarning)
try:
self.haconnectstate = "Running"
self.ws.run_forever(ping_timeout=999)
except self.HAClose:
logsupport.Logs.Log(self.name + " Event thread got close")
sev = ConsoleWarning if self.ReportThisError() else logsupport.ConsoleInfo
logsupport.Logs.Log(self.name + " Event Thread " + str(self.HAnum) + " exiting", severity=sev,
tb=False)
if self.haconnectstate not in ("Failed", "Closed"): self.haconnectstate = "Exited"
def ReportThisError(self):
return config.sysStore.ErrLogReconnects and not self.restarting
def ParseDomainCommands(self, dom, services):
title = '{} ParseSpecial:'.format(dom)
entry = {}
normal = True
for c, info in services.items():
try:
t = info['target']
if 'entity' in t and 'domain' in t['entity'] and t['entity']['domain'] == dom:
targ = ''
entry[c] = {'target': dom}
elif 'entity' in t and t['entity'] == {}:
targ = ''
entry[c] = {'target': '*'}
else:
normal = False
entry[c] = {'target': 'NONSTD'}
targ = t
except Exception:
entry[c] = {'target': 'NONE'}
targ = " No Target"
flds = []
keys = []
s = {}
try:
for fn, f in info['fields'].items():
s = f['selector'] if 'selector' in f else {}
keys = list(s.keys())
if len(keys) == 0:
entry[c][fn] = 'No selector'
elif len(keys) > 1:
flds.append(" Field: {} Selector: {}".format(fn, keys))
else:
entry[c][fn] = keys[0]
except Exception as E:
safeprint("Pars excp: {} {} {} {}".format(dom, E, c, info))
safeprint('Info: {} {}'.format(s, keys))
if not normal:
with open('{}-nonentitycmds.txt'.format(self.name), 'w') as f:
if title != '':
safeprint("{} {}".format(self.name, title), file=f)
title = ''
safeprint(" Command: {}".format(c), file=f)
if targ != '': safeprint(" Target: {}".format(targ), file=f)
for l in flds: safeprint(l, file=f)
else:
self.SpecialCmds[dom] = entry
# noinspection PyUnusedLocal
def __init__(self, hubname, addr, user, password, version):
self.SpecialCmds = {}
self.restarting = False
self.restartingtime = 0
self.UnknownList = {}
self.StatusCheckerThread = None
self.DomainEntityReg = {}
self.knownservices = []
self.DeviceToEnt = {} # heuristically created lists - may not be absolutely acccurate since HA won't provide this mapping
self.EntToDev = {}
self.DevGoneCounts = {}
self.MonitoredAttributes = {} # holds tuples with the name of attribute that is used in an alert
self.HB = historybuffer.HistoryBuffer(40, hubname)
if version not in (0, 1):
logsupport.Logs.Log("Fatal error - no HA hub version {}".format(version), severity=ConsoleError)
raise ValueError
logsupport.Logs.Log(
"{}: Creating structure for Home Assistant hub version {} at {}".format(hubname, version, addr))
# import supported domains
self.dyndomains = utilfuncs.importmodules('hubs/ha/domains')
if version == 0: # todo delete at some point
logsupport.Logs.Log('Using old version of HA climate support - are you sure?',
severity=ConsoleWarning)
self.dyndomains['thermostat'] = importlib.import_module('hubs.ha.domains.__oldthermostat')
for dom in hadomains:
self.DomainEntityReg[dom] = {}
self.addibledomains = {} # {'media_player': MediaPlayer} todo resolve how to add things
self.name = hubname
# with open('/home/pi/Console/msglog{}'.format(self.name), 'w') as f:
# f.write('----------START Log\n')
if addr.startswith('https'):
prefix = 'https://'
wsprefix = 'wss://'
elif addr.startswith('http'):
prefix = 'http://'
wsprefix = 'ws://'
else:
prefix = 'http://'
wsprefix = 'ws://'
trimmedaddr = addr.replace(prefix, '', 1)
if ':' in trimmedaddr:
self.addr = trimmedaddr.split(':')[0]
self.port = trimmedaddr.split(':')[1]
else:
self.addr = trimmedaddr
self.port = '8123'
self.url = prefix + self.addr + ':' + self.port
self.wsurl = '{}{}:{}/api/websocket'.format(wsprefix, self.addr, self.port)
self.config = None
self.password = password
self.HAnum = 0
self.ws = None # websocket client instance
self.msgcount = 0
self.watchstarttime = time.time()
self.Entities = {}
self.Domains = {}
self.Indirectors = {} # these hold nodes that the console config thinks exist but HA doesn't have yet - happens at startup of joint HA/Console node
self.alertspeclist = {} # if ever want auto alerts like ISY command vars they get put here
self.AlertNodes = {}
self.lasterror = None
if password != '':
self.api = ha.API(self.addr, prefix, password, port=int(self.port))
else:
self.api = ha.API(self.addr, prefix, port=int(self.port))
for i in range(9 if config.sysStore.versionname not in ('none', 'development') else 1):
hassok = False
apistat = ha.validate_api(self.api)
if apistat == ha.APIStatus.OK:
if i > 2: # this was probably a power fail restart so need to really wait while HA stabilizes
logsupport.Logs.Log(
'{}: Probable power fail restart so delay to allow HA stabilization'.format(self.name))
time.sleep(120)
hassok = True
break
elif apistat == ha.APIStatus.CANNOT_CONNECT:
logsupport.Logs.Log('{}: Not yet responding (starting up?)({})'.format(self.name, i))
time.sleep(10 * (i+1))
elif apistat == ha.APIStatus.INVALID_PASSWORD:
logsupport.Logs.Log('{}: Bad access key'.format(self.name), severity=ConsoleError)
raise ValueError
else:
logsupport.Logs.Log(
'{}: Failed access validation for unknown reasons ({})'.format(self.name, repr(apistat)),
severity=ConsoleWarning)
time.sleep(5)
# noinspection PyUnboundLocalVariable
if hassok:
logsupport.Logs.Log('{}: Access accepted'.format(self.name))
else:
logsupport.Logs.Log('HA access failed multiple trys for: ' + self.name, severity=ConsoleError, tb=False)
raise HubInitError
self.attrstore = valuestore.NewValueStore(
haattraccess.HAattributes(hubname, self)) # don't create until access is ok
entities = ha.get_states(self.api)
byobjid = collections.OrderedDict()
DontClassify = list(IgnoredDomains) + ['scene', 'input_number', 'input_boolean', 'input_select', 'automation',
'script', 'cover']
for e in entities:
if e.domain not in self.Domains:
self.Domains[e.domain] = {}
p2 = dict(e.as_dict(), **{'domain': e.domain, 'name': e.name, 'object_id': e.object_id})
if e.domain not in DontClassify: byobjid[e.object_id + '_' + e.domain] = p2
if e.domain in hadomains:
N = hadomains[e.domain](self, p2)
self.Entities[e.entity_id] = N
else:
AddIgnoredDomain(e.domain)
N = hadomains[e.domain](self, p2)
logsupport.Logs.Log(self.name + ': Uncatagorized HA domain type: ', e.domain, ' for entity: ',
e.entity_id)
debug.debugPrint('HASSgeneral', "Unhandled node type: ", e.object_id)
self.Domains[e.domain][e.object_id] = N
sortedents = sorted(byobjid.items()) + [('ZZZZZZZ', None)]
i = 0
gpstart = 0
devlist = {}
notdone = True
longmatch = 999
startstring = ''
while notdone:
# print(sortedents[i][0])
if gpstart == i:
startstring = sortedents[i][0].split('_')
longmatch = 999
i += 1
else:
frontmatch = 0
nextitem = sortedents[i][0].split('_')
while startstring[frontmatch] == nextitem[frontmatch]: frontmatch += 1
if frontmatch != 0:
longmatch = min(longmatch, frontmatch)
i += 1
else:
# print('Device {} {} {} to {}'.format('_'.join(startstring[0:longmatch]), longmatch, gpstart, i-1))
entsfordev = []
devname = '_'.join(startstring[0:longmatch])
for j in sortedents[gpstart:i]:
# print(' {}'.format(j[1]))
entsfordev.append(j[1]['entity_id'])
self.AddDevice(devname, entsfordev)
if sortedents[i][0] != 'ZZZZZZZ':
gpstart = i
else:
notdone = False
for dev, ents in self.DeviceToEnt.items():
for i in ents: self.EntToDev[i] = dev
# print('{} -> {}'.format(dev,ents))
for n, T in self.DomainEntityReg['climate'].items():
# This is special cased for Thermostats to connect the sensor entity with the thermostat to check for changes
# If any other domain ever needs the same mechanism this should just be generalized to a "finish-up" call for
# every entity
try:
try:
tname = n.split('.')[1]
tsensor = self.DomainEntityReg['sensor']['sensor.' + tname + '_thermostat_hvac_state']
# noinspection PyProtectedMember
T._connectsensors(tsensor)
except Exception as E:
logsupport.Logs.Log(
'Exception from {} connecting sensor {} ({}) probably ISY Tstat'.format(self.name, n, E),
severity=ConsoleDetail)
except Exception as E:
logsupport.Logs.Log('Exception looking at climate devices: {} ({})'.format(n, E),
severity=ConsoleWarning)
self.haconnectstate = "Init"
services = {}
for i in range(3):
services = ha.get_services(self.api)
if services != {}: break
logsupport.Logs.Log('Retry getting services from {}'.format(self.name))
time.sleep(1)
if services == {}:
logsupport.Logs.Log('{} reports no services'.format(self.name), severity=ConsoleWarning)
self.knownservices = {}
for d in services:
if not d['domain'] in self.knownservices:
self.knownservices[d['domain']] = {}
try:
self.ParseDomainCommands(d['domain'], d['services'])
except Exception as E:
safeprint('Parse Except: {}'.format(E))
for s, c in d['services'].items():
if s in self.knownservices[d['domain']]:
logsupport.DevPrint(
'Duplicate service noted for domain {}: service: {} existing: {} new: {}'.format(d['domain'], s,
self.knownservices[
d[
'domain'][
s]],
c))
self.knownservices[d['domain']][s] = c
# print(self.SpecialCmds)
# for d, cmds in self.SpecialCmds.items():
# print("Domain {}".format(d))
# for c,param in cmds.items():
# print(" {}({}): {}".format(c,param['target'],{x: param[x] for x in param if x != 'target'}))
if config.sysStore.versionname in ('development', 'homerelease'):
with open('{}/Console/{}-services'.format(config.sysStore.HomeDir, self.name), 'w') as f:
for d, svc in self.knownservices.items():
print(d, file=f)
for s, c in svc.items():
print(' {}'.format(s), file=f)
print(' {}'.format(c), file=f)
print('==================', file=f)
with open('{}/Console/{}-entities'.format(config.sysStore.HomeDir, self.name), 'w') as f:
print('===== Ignored =====', file=f)
for d, de in self.DomainEntityReg.items():
for e, t in de.items():
if isinstance(t, self.dyndomains['ignore'].IgnoredDomain):
print('Ignored entity in {}: {} {}'.format(self.name, d, e), file=f)
print('===== Active =====', file=f)
for d, de in self.DomainEntityReg.items():
for e, t in de.items():
if not isinstance(t, self.dyndomains['ignore'].IgnoredDomain):
print('Watched entity in {}: {} {}'.format(self.name, d, e), file=f)
print('===== New =====', file=f)
# listeners = ha.get_event_listeners(self.api)
logsupport.Logs.Log(self.name + ": Processed " + str(len(self.Entities)) + " total entities")
for d, e in self.DomainEntityReg.items():
if e != {}:
if isinstance(list(e.values())[0], self.dyndomains['ignore'].IgnoredDomain):
logsupport.Logs.Log(" {}: {} (Ignored)".format(d, len(e)))
else:
logsupport.Logs.Log(" {}: {}".format(d, len(e)))
if d == 'unset':
for i in e:
logsupport.Logs.Log(' :{}'.format(i))
self.initialstartup = True
threadmanager.SetUpHelperThread(self.name, self.HAevents, prerestart=self.PreRestartHAEvents,
poststart=self.PostStartHAEvents, postrestart=self.PostStartHAEvents,
prestart=self.PreRestartHAEvents, checkok=self.HACheckThread,
rpterr=self.ReportThisError)
logsupport.Logs.Log("{}: Finished creating structure for hub".format(self.name))
| {
"content_hash": "361c08abf981ba53c1b7b0c9d0ea6854",
"timestamp": "",
"source": "github",
"line_count": 1060,
"max_line_length": 172,
"avg_line_length": 37.64905660377359,
"alnum_prop": 0.6520998296080986,
"repo_name": "kevinkahn/softconsole",
"id": "19a0dcf0740bcfa8d5ab05a1f32ccb5d3a28dda3",
"size": "39908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hubs/ha/hasshub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Euphoria",
"bytes": "267"
},
{
"name": "Python",
"bytes": "839903"
},
{
"name": "Shell",
"bytes": "101927"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from webassets.filter import Filter
__all__ = ('Jinja2',)
class Jinja2(Filter):
"""Process a file through the Jinja2 templating engine.
Requires the ``jinja2`` package (https://github.com/mitsuhiko/jinja2).
"""
name = 'jinja2'
options = {
'context': 'JINJA2_CONTEXT',
}
def setup(self):
try:
import jinja2
except ImportError:
raise EnvironmentError('The "jinja2" package is not installed.')
else:
self.jinja2 = jinja2
super(Jinja2, self).setup()
def output(self, _in, out, **kw):
out.write(self.jinja2.Template(_in.read()).render(self.context or {}))
| {
"content_hash": "37c23edb967510e6bc01cf1ce2893787",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 24.620689655172413,
"alnum_prop": 0.5980392156862745,
"repo_name": "torchbox/webassets",
"id": "b80d259fd16052014449490ad6e7bcad4a32badd",
"size": "714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/webassets/filter/jinja2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import
import bacula_tools
import logging
import optparse
class Job(bacula_tools.DbDict):
'''This actually covers both Jobs and JobDefs, EXCEPT during parsing, when
the JobDef subclass is used because it has different default values.
'''
SETUP_KEYS = [bacula_tools.TYPE, bacula_tools.LEVEL,
bacula_tools.NOTES, bacula_tools.ADDPREFIX, bacula_tools.ADDSUFFIX,
bacula_tools.BASE, bacula_tools.BOOTSTRAP,
bacula_tools.MAXIMUMBANDWIDTH, bacula_tools.MAXSTARTDELAY,
bacula_tools.REGEXWHERE, bacula_tools.RUN, bacula_tools.SPOOLSIZE,
bacula_tools.STRIPPREFIX, bacula_tools.VERIFYJOB,
bacula_tools.WHERE, bacula_tools.WRITEBOOTSTRAP,
(bacula_tools.REPLACE, 'always'),
bacula_tools.DIFFERENTIALMAXWAITTIME, bacula_tools.IDMAXWAITTIME,
bacula_tools.INCREMENTALMAXRUNTIME, bacula_tools.MAXRUNSCHEDTIME,
bacula_tools.MAXRUNTIME, bacula_tools.MAXWAITTIME,
bacula_tools.MAXFULLINTERVAL, bacula_tools.RESCHEDULEINTERVAL, ]
INT_KEYS = [bacula_tools.MAXIMUMCONCURRENTJOBS,
bacula_tools.RESCHEDULETIMES, bacula_tools.PRIORITY]
BOOL_KEYS = [bacula_tools.ENABLED, bacula_tools.PREFERMOUNTEDVOLUMES,
bacula_tools.ACCURATE, bacula_tools.ALLOWDUPLICATEJOBS,
bacula_tools.ALLOWMIXEDPRIORITY,
bacula_tools.CANCELLOWERLEVELDUPLICATES,
bacula_tools.CANCELQUEUEDDUPLICATES,
bacula_tools.CANCELRUNNINGDUPLICATES, bacula_tools.JOBDEF,
bacula_tools.PREFIXLINKS, bacula_tools.PRUNEFILES,
bacula_tools.PRUNEJOBS, bacula_tools.PRUNEVOLUMES,
bacula_tools.RERUNFAILEDLEVELS,
bacula_tools.RESCHEDULEONERROR,
bacula_tools.SPOOLATTRIBUTES, bacula_tools.SPOOLDATA,
bacula_tools.WRITEPARTAFTERJOB]
REFERENCE_KEYS = [bacula_tools.DIFFERENTIALPOOL_ID,
bacula_tools.FILESET_ID, bacula_tools.FULLPOOL_ID,
bacula_tools.CLIENT_ID, bacula_tools.INCREMENTALPOOL_ID,
bacula_tools.MESSAGES_ID, bacula_tools.POOL_ID,
bacula_tools.SCHEDULE_ID, bacula_tools.STORAGE_ID, ]
# These won't be handled en- masse
SPECIAL_KEYS = [bacula_tools.JOB_ID, ]
table = bacula_tools.JOBS
retlabel = 'Job'
def __init__(self, row={}, string=None):
'''Need to have a nice, clean scripts member'''
bacula_tools.DbDict.__init__(self, row, string)
self.scripts = []
return
def __str__(self):
'''String representation of a Job, suitable for inclusion in a Director config'''
self.output = ['%s {' % self.retlabel, '}']
self._simple_phrase(bacula_tools.NAME)
for x in self.SETUP_KEYS:
self._simple_phrase(x)
for x in self.INT_KEYS:
self._simple_phrase(x)
for x in self.REFERENCE_KEYS:
if self[x] == None:
continue
self.output.insert(-1, ' %s = "%s"' %
(x.replace('_id', '').capitalize(), self._fk_reference(x)[bacula_tools.NAME]))
if self[bacula_tools.JOB_ID]:
self.output.insert(-1, ' JobDefs = "%s"' %
self._fk_reference(bacula_tools.JOB_ID)[bacula_tools.NAME])
for x in self.BOOL_KEYS:
if x == bacula_tools.JOBDEF:
continue
self._yesno_phrase(x)
for x in self.scripts:
self.output.insert(-1, str(x))
return '\n'.join(self.output)
def _fk_reference(self, fk, string=None):
'''This overrides the normal _fk_reference function becase we actually have
four different keys that all point to Pools.
'''
key = fk.replace('_id', '')
if 'pool' in key:
key = 'pool'
obj = bacula_tools._DISPATCHER[key]()
if string:
obj.search(string.strip())
if not obj[bacula_tools.ID]:
obj.set_name(string.strip())
if not self[fk] == obj[bacula_tools.ID]:
self.set(fk, obj[bacula_tools.ID])
else:
obj.search(self[fk])
return obj
def _load_scripts(self):
'''Job scripts are stored separately as Script objects. This loads them in. '''
if self[bacula_tools.ID]:
for row in self.bc.do_sql('SELECT * FROM job_scripts WHERE job_id = %s', (self[bacula_tools.ID],), asdict=True):
s = bacula_tools.Script(
{bacula_tools.ID: row[bacula_tools.SCRIPT_ID]})
s.search()
self.scripts = [
x for x in self.scripts if not x[bacula_tools.ID] == s[bacula_tools.ID]]
self.scripts.append(s)
return
def _parse_script(self, **kwargs):
'''Helper function for parsing configuration strings.'''
def doit(a, b, c):
s = bacula_tools.Script(kwargs)
s[bacula_tools.COMMAND] = c[2]
s.search()
return self._add_script(s)
return doit
def _add_script(self, s):
'''Add a script to the Job.'''
self.scripts = [x for x in self.scripts if not x[bacula_tools.ID]
== s[bacula_tools.ID]]
self.scripts.append(s)
row = self.bc.do_sql(
'SELECT * FROM job_scripts WHERE job_id = %s AND script_id = %s', (self[bacula_tools.ID], s[bacula_tools.ID]))
if not row:
self.bc.do_sql(
'INSERT INTO job_scripts(job_id, script_id) VALUES (%s, %s)', (self[bacula_tools.ID], s[bacula_tools.ID]))
return s
def _delete_script(self, s):
'''Remove a Script from the Job. This does not actually delete the Script,
just the linkage to this job.'''
self.bc.do_sql(
'DELETE FROM job_scripts WHERE id = %s', (s[bacula_tools.ID]))
self.scripts = [
x for x in self.scripts if not x[bacula_tools.ID] == s[bacula_tools.ID]]
return
def _parse_script_full(self, *tokens):
'''Another helper for script parsing.'''
from pprint import pprint
s = bacula_tools.Script()
values = tokens[2][1]
while values:
k, n, v = values[:3]
del values[:3]
s[k.lower()] = v
self._add_script(s.search())
return
def _cli_special_setup(self):
'''Suport for adding all of the foreign-key references to the CLI.'''
group = optparse.OptionGroup(self.parser,
"Object Setters",
"Various objects associated with a Job")
group.add_option(
'--pool', help='Use this pool for all backups unless overridden by a more specific pool')
group.add_option(
'--differential-pool', help='Use this pool for differential backups instead of the standard pool')
group.add_option(
'--full-pool', help='Use this pool for full backups instead of the standard pool')
group.add_option(
'--incremental-pool', help='Use this pool for incremental backups instead of the standard pool')
group.add_option('--fileset')
group.add_option('--client')
group.add_option('--message-set')
group.add_option('--schedule')
group.add_option('--storage')
group.add_option(
'--default-job', help='The job which will supply default values for those otherwise unset on this one')
group.add_option(
'--add-script', help='Script name or ID to be added')
group.add_option(
'--remove-script', help='Script name or ID to be removed')
self.parser.add_option_group(group)
return
def _cli_special_do_parse(self, args):
'''CLI Foreign Key reference actions.'''
self._cli_deref_helper(
bacula_tools.POOL_ID, args.pool, bacula_tools.Pool)
self._cli_deref_helper(
bacula_tools.DIFFERENTIALPOOL_ID, args.differential_pool, bacula_tools.Pool)
self._cli_deref_helper(
bacula_tools.FULLPOOL_ID, args.full_pool, bacula_tools.Pool)
self._cli_deref_helper(
bacula_tools.INCREMENTALPOOL_ID, args.incremental_pool, bacula_tools.Pool)
self._cli_deref_helper(
bacula_tools.FILESET_ID, args.fileset, bacula_tools.Fileset)
self._cli_deref_helper(
bacula_tools.CLIENT_ID, args.client, bacula_tools.Client)
self._cli_deref_helper(
bacula_tools.MESSAGES_ID, args.message_set, bacula_tools.Messages)
self._cli_deref_helper(
bacula_tools.SCHEDULE_ID, args.schedule, bacula_tools.Schedule)
self._cli_deref_helper(
bacula_tools.STORAGE_ID, args.storage, bacula_tools.Storage)
self._cli_deref_helper(
bacula_tools.JOB_ID, args.default_job, bacula_tools.JobDef)
if args.add_script:
self._cli_script_helper(args.add_script)
if args.remove_script:
self._cli_script_helper(args.remove_script, True)
return
def _cli_deref_helper(self, key, value, obj):
'''Shortcut function to make _cli_special_do_parse() a lot cleaner.'''
if value == None:
return
if value == '':
return self.set(key, None)
target = obj().search(value)
if target[bacula_tools.ID]:
self.set(key, target[bacula_tools.ID])
else:
print('Unable to find a match for %s, continuing' % value)
pass
def _cli_script_helper(self, script, remove=False):
'''Script management.'''
script_obj = bacula_tools.Script().search(script)
if not script_obj[bacula_tools.ID]:
print('Unable to find script "%s", skipping' % script)
return
if remove:
self._delete_script(script_obj)
else:
self._add_script(script_obj)
self._save()
return
def _cli_special_print(self):
'''All of the foreign key objects get printed out here for the CLI.'''
fmt = '%' + str(self._maxlen) + 's: %s'
for x in self.REFERENCE_KEYS + self.SPECIAL_KEYS:
if self[x] == None:
continue
print(
fmt % (x.replace('_id', '').capitalize(), self._fk_reference(x)[bacula_tools.NAME]))
if self.scripts:
print('\nScripts')
for x in self.scripts:
print(x)
return
def _cli_special_clone(self, oid):
'''When cloning, add in script links.'''
select = 'SELECT %s,script_id FROM job_scripts WHERE job_id = %%s' % self[
bacula_tools.ID]
insert = 'INSERT INTO job_scripts (job_id,script_id) VALUES (%s,%s)'
for row in self.bc.do_sql(select, oid):
self.bc.do_sql(insert, row)
self._load_scripts()
pass
def delete(self):
'''Job-specific deletion bits'''
storage_things = []
for storage in bacula_tools.Storage().Find(id=self[bacula_tools.STORAGE_ID]):
storage_things.append(storage)
bacula_tools.DbDict.delete(self)
for storage in storage_things:
# Have to be careful, it may be that multiple jobs reference the
# same storage
if len(bacula_tools.Job().Find(storage_id=self[bacula_tools.STORAGE_ID])) > 1:
continue
device_name = storage[bacula_tools.DEVICE]
storage.delete()
if len(bacula_tools.Storage().Find(device=device_name)):
continue
for device in bacula_tools.Device().Find(name=device_name):
device.delete()
class JobDef(Job):
'''This is really just a Job with a different label (for printing) and a value of 1 for the JOBDEF key.'''
retlabel = 'JobDefs'
def _save(self):
'''JobDefs force the JOBDEF key to 1 upon saving.'''
self[bacula_tools.JOBDEF] = 1
return Job._save(self)
def main():
s = Job()
s.cli()
if __name__ == "__main__":
main()
| {
"content_hash": "5bcb9be0626ee7f32c7b67ceee8ae054",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 124,
"avg_line_length": 42.37328767123287,
"alnum_prop": 0.5761739270993291,
"repo_name": "BrianGallew/bacula_configuration",
"id": "39f45e720ffbd95b902dedc274ad55292c5a68ca",
"size": "12420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bacula_tools/job.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "239234"
}
],
"symlink_target": ""
} |
import csv
import operator
import datetime
import itertools
import json
EVENTS_FILE = "../data/events.csv"
PROJECTS_FILE = "../data/projects.csv"
EVENTS_JSON_FILE = "events.json"
projects = list()
# Helper functions
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
# http://stackoverflow.com/a/15823348
def json_datetime_default(obj):
"""Default JSON serializer."""
import calendar, datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(
calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000
)
return millis
with open(PROJECTS_FILE, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
projects += [row]
projects = sorted(projects, key=operator.itemgetter(0))
# 2013-10-28 18:00:00+01
# format: %Y-%m-%d %H:00:00+%z
events = list()
with open(EVENTS_FILE, 'r') as csvfile:
reader = csv.reader(csvfile)
for iid, title, visitors, startdate, program_id in reader:
events += [[iid, title, visitors, datetime.datetime.strptime(startdate[:-9], "%Y-%m-%d %H"), program_id]]
events = sorted(events, key=operator.itemgetter(3))
start_begin = (events[0][3], events[-1][3])
data_bined = list()
def grouper( item ):
return item[3].year, item[3].month
for ((year, month), items) in itertools.groupby(events, grouper):
data_bined += [[year, month, sorted(list(items), key=operator.itemgetter(4))]]
json.dump(data_bined, open(EVENTS_JSON_FILE, "w"), default=json_datetime_default)
| {
"content_hash": "fb858a9611a747c67d620471bb5421cd",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 107,
"avg_line_length": 26.476190476190474,
"alnum_prop": 0.6660671462829736,
"repo_name": "kiberpipa/pipa-visualizations",
"id": "3ad2b78cf01a7c8b5ef38e8f2a038433b1d3be28",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events-in-time/events-to-json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2161"
},
{
"name": "JavaScript",
"bytes": "336448"
},
{
"name": "Python",
"bytes": "4977"
}
],
"symlink_target": ""
} |
from stoq.data_classes import StoqResponse
from stoq.plugins import DecoratorPlugin
class DummyDecorator(DecoratorPlugin):
async def decorate(self, response: StoqResponse) -> None:
pass
| {
"content_hash": "898434e85a185e604c1b9b987f1f9585",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.775,
"repo_name": "PUNCH-Cyber/stoq",
"id": "321b3dd2af3e0874dd430b0df38947c97dd1633e",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stoq/tests/data/plugins/decorator/dummy_decorator/dummy_decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "202358"
}
],
"symlink_target": ""
} |
"""Finds CrOS browsers that can be controlled by telemetry."""
import logging
import sys
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core.chrome import cros_platform_backend
from telemetry.core.chrome import cros_browser_backend
from telemetry.core.chrome import cros_interface
ALL_BROWSER_TYPES = ','.join([
'cros-chrome',
'system-cros',
])
class PossibleCrOSBrowser(possible_browser.PossibleBrowser):
"""A launchable chromeos browser instance."""
def __init__(self, browser_type, options, cri):
super(PossibleCrOSBrowser, self).__init__(browser_type, options)
self._cri = cri
def __repr__(self):
return 'PossibleCrOSBrowser(browser_type=%s)' % self.browser_type
def Create(self):
backend = cros_browser_backend.CrOSBrowserBackend(
self.browser_type, self._options, self._cri)
b = browser.Browser(backend,
cros_platform_backend.CrosPlatformBackend(self._cri))
backend.SetBrowser(b)
return b
def SupportsOptions(self, options):
return True
def FindAllAvailableBrowsers(options):
"""Finds all available chromeos browsers, locally and remotely."""
# Check if we are on a chromeos device.
if sys.platform.startswith('linux'):
with open('/etc/lsb-release', 'r') as f:
res = f.read()
if res.count('CHROMEOS_RELEASE_NAME'):
return [PossibleCrOSBrowser('system-cros', options,
cros_interface.CrOSInterface())]
if options.cros_remote == None:
logging.debug('No --remote specified, will not probe for CrOS.')
return []
if not cros_interface.HasSSH():
logging.debug('ssh not found. Cannot talk to CrOS devices.')
return []
cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
# Check ssh
try:
cri.TryLogin()
except cros_interface.LoginException, ex:
if isinstance(ex, cros_interface.KeylessLoginRequiredException):
logging.warn('Could not ssh into %s. Your device must be configured',
options.cros_remote)
logging.warn('to allow passwordless login as root.')
logging.warn('For a test-build device, pass this to your script:')
logging.warn(' --identity $(CHROMITE)/ssh_keys/testing_rsa')
logging.warn('')
logging.warn('For a developer-mode device, the steps are:')
logging.warn(' - Ensure you have an id_rsa.pub (etc) on this computer')
logging.warn(' - On the chromebook:')
logging.warn(' - Control-Alt-T; shell; sudo -s')
logging.warn(' - openssh-server start')
logging.warn(' - scp <this machine>:.ssh/id_rsa.pub /tmp/')
logging.warn(' - mkdir /root/.ssh')
logging.warn(' - chown go-rx /root/.ssh')
logging.warn(' - cat /tmp/id_rsa.pub >> /root/.ssh/authorized_keys')
logging.warn(' - chown 0600 /root/.ssh/authorized_keys')
logging.warn('There, that was easy!')
logging.warn('')
logging.warn('P.S. Please, tell your manager how INANE this is.')
else:
logging.warn(str(ex))
return []
if not cri.FileExistsOnDevice('/opt/google/chrome/chrome'):
logging.warn('Could not find a chrome on ' % cri.hostname)
return [PossibleCrOSBrowser('cros-chrome', options, cri)]
| {
"content_hash": "11368f5d201e5503797b4d74d9d388d3",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 77,
"avg_line_length": 38.02272727272727,
"alnum_prop": 0.6586969515839809,
"repo_name": "timopulkkinen/BubbleFish",
"id": "908df4403e8efd05d8485a885f0a01bda2e670be",
"size": "3512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/chrome/cros_browser_finder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75801820"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "161884021"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3531849"
},
{
"name": "JavaScript",
"bytes": "18556005"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7254742"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "933011"
},
{
"name": "Python",
"bytes": "8808682"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1537764"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import oslo_messaging as messaging
from murano.common import config
from murano.common import uuidutils
from murano.dsl import murano_class
from murano.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@murano_class.classname('io.murano.system.StatusReporter')
class StatusReporter(object):
transport = None
def initialize(self, environment):
if StatusReporter.transport is None:
StatusReporter.transport = \
messaging.get_transport(config.CONF)
self._notifier = messaging.Notifier(
StatusReporter.transport,
publisher_id=uuidutils.generate_uuid(),
topic='murano')
self._environment_id = environment.object_id
def _report(self, instance, msg, details=None, level='info'):
body = {
'id': instance.object_id,
'text': msg,
'details': details,
'level': level,
'environment_id': self._environment_id
}
self._notifier.info({}, 'murano.report_notification', body)
def report(self, instance, msg):
self._report(instance, msg)
def report_error(self, instance, msg):
self._report(instance, msg, None, 'error')
| {
"content_hash": "431541c24c38897b8751ac40841a39d2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 67,
"avg_line_length": 31,
"alnum_prop": 0.6370967741935484,
"repo_name": "chenyujie/hybrid-murano",
"id": "fcf5cb8550cf07023388f772042f08786c99795d",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/hybrid-master",
"path": "murano/engine/system/status_reporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "1004440"
},
{
"name": "Shell",
"bytes": "6751"
}
],
"symlink_target": ""
} |
import sys
from random import choice as randchoice
from itertools import cycle
from pygame import *
from pygame import gfxdraw
from board import PygameBoard, Loc, black, white, gray, center_square
from utils import *
"""
which version to use?
hl/move should be in Board
when capturing, piece moves a 2nd time
"""
game_size = 12
tilesize = 45
def same_side(p1, p2):
return p1 and p2 and getattr(p1, "player", p1) == getattr(p2, "player", p2)
class Player(object):
def __init__(self, id, pieces=None):
self.id = id
self.pieces = pieces or []
def __repr__(self):
return self.id
class BaseTile(object):
highlight = False
piece = None
def __init__(self, board=None, loc=None, none=False):
self.board = board
self.loc = loc
self.none = none # unmovable-to tile, should not be drawn
def set_none(self):
self.none = True
if self.board:
self.board.make_blank(self.loc)
@property
def blank(self):
return not self.piece and not self.none
class BasePiece(object):
def __init__(self, player, id, board=None, loc=None):
self.player = player
self.id = id
self.board = board
self.loc = loc
if self.board and self.loc:
self.place()
player.pieces.append(self)
def __repr__(self):
return self.player
def draw(self):
"""Draw piece."""
getattr(self, "draw_"+self.id)(self.loc)
display.update()
def move(self, loc):
self.board.move(self.loc, loc)
def place(self):
self.board[self.loc].piece = self
self.draw()
class Piece(BasePiece):
def draw_r(self, loc):
B = self.board
r = center_square(B.resolve_loc(loc), iround(B.tilesize*0.5))
draw.rect(B.sfc, (50,50,50), r, 1)
draw.rect(B.sfc, gray, r.inflate(-4,-4), 0)
B.scr.blit(B.sfc, (0,0))
def draw_o(self, loc):
B = self.board
loc = B.resolve_loc(loc)
rad = iround((B.tilesize/2) * 0.6)
gfxdraw.filled_circle(B.sfc, loc[0], loc[1], rad, (120,120,120))
gfxdraw.aacircle(B.sfc, loc[0], loc[1], rad + 2, black)
B.scr.blit(B.sfc, (0,0))
class GameBoard(PygameBoard):
def move(self, loc1, loc2):
p1 = self[loc1].piece
p2 = self[loc2].piece
if p1 == p2: return # can't capture own piece
if p2 in ai_pieces:
ai_pieces.remove(p2)
elif p2 in player_pieces:
player_pieces.remove(p2)
self.clear(loc2)
super(GameBoard, self).move(loc1, loc2)
class Game1(object):
winmsg = "%s is the winner!"
drawmsg = "It's a draw!"
def game_won(self, winner):
board.message(self.winmsg % winner if winner else self.drawmsg)
board.wait_exit()
def run(self):
"""Main loop."""
for player in cycle(players):
self.make_move(player)
if not ai_pieces:
self.game_won(p1.id)
if not player_pieces:
self.game_won(ai.id)
def make_move(self, player):
if player == ai:
self.ai_move(player)
else:
self.human_move(player)
def ai_move(self, player):
"""Capture player piece if possible, otherwise move to a blank if possible, or try another piece."""
shuffle(player.pieces)
for p in player.pieces:
nbrs = board.neighbour_locs(p)
pl = [loc for loc in nbrs if same_side(board[loc].piece, p1)]
blanks = [loc for loc in nbrs if board[loc].blank]
loc = first(pl) or randchoice(blanks) if blanks else None
if loc:
p.move(loc)
break
def human_move(self, player):
""" Select a piece and then move a highlighted piece.
select logic:
- only player's piece can be selected
- click on a piece to select, click again to deselect
- move if a piece is selected AND clicked on a valid location
- if a piece is already selected and clicked on a new player's piece, the old one is
deselected
"""
hl_loc = None
while True:
loc = board.get_click_index()
if same_side(board[loc].piece, player):
board.toggle_highlight(loc)
if hl_loc and hl_loc != loc:
board.toggle_highlight(hl_loc)
if hl_loc==loc : hl_loc = None
else : hl_loc = loc
elif hl_loc and board.dist(loc, hl_loc) < 2:
if not board[loc].none:
# capture piece or move to a blank tile
board.toggle_highlight(hl_loc)
board.move(hl_loc, loc)
break
if __name__ == "__main__":
arg = sys.argv[1:]
if arg: game_size = int(arg[0])
board = GameBoard((game_size, game_size), tilesize, circle=1, tile_cls=BaseTile)
imax = game_size - 1
for loc in [(0,0), (0,imax), (imax,0), (imax,imax)]:
board[loc].set_none()
p1, ai = Player('r'), Player('o')
ai_pieces = [Piece(ai, 'o', board, board.random_blank()) for _ in range(2)]
player_pieces = [Piece(p1, 'r', board, board.random_blank()) for _ in range(2)]
players = p1, ai
Game1().run()
| {
"content_hash": "d35def8c1faa41d4f4f58e034c2b5c98",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 108,
"avg_line_length": 29.683060109289617,
"alnum_prop": 0.5506259204712813,
"repo_name": "akulakov/pygame-games",
"id": "538bf769d2707389f3a5f27d79d70bebf6a7fab6",
"size": "5478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game1.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52375"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Database.backup_path'
db.add_column(u'logical_database', 'backup_path',
self.gf('django.db.models.fields.CharField')(max_length=300, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Database.backup_path'
db.delete_column(u'logical_database', 'backup_path')
models = {
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logical.credential': {
'Meta': {'ordering': "(u'database', u'user')", 'unique_together': "((u'user', u'database'),)", 'object_name': 'Credential'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'credentials'", 'to': u"orm['logical.Database']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logical.database': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'},
'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}),
'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'logical.project': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
'equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Plan']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['logical'] | {
"content_hash": "35d609c1be03e97f46460dbd8fa8d1d5",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 205,
"avg_line_length": 84.07272727272728,
"alnum_prop": 0.5568771626297578,
"repo_name": "globocom/database-as-a-service",
"id": "8e86e894747109779447b9fd8a8087c7369546dc",
"size": "13896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/logical/migrations/0018_auto__add_field_database_backup_path.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
} |
from test_framework import generic_test
def snake_string(s: str) -> str:
# TODO - you fill in here.
return ''
if __name__ == '__main__':
exit(
generic_test.generic_test_main('snake_string.py', 'snake_string.tsv',
snake_string))
| {
"content_hash": "8b28f665648db4e364a8635a1050a812",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 24.25,
"alnum_prop": 0.5360824742268041,
"repo_name": "shobhitmishra/CodingProblems",
"id": "e367f2aed0d029d67b0a2e52b069eec8fdb21ee2",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_judge_python/snake_string.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
"""
A client for AWS batch services
.. seealso::
- http://boto3.readthedocs.io/en/latest/guide/configuration.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
from random import uniform
from time import sleep
from typing import Dict, List, Optional, Union
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.typing_compat import Protocol, runtime_checkable
# Add exceptions to pylint for the boto3 protocol only; ideally the boto3 library
# could provide
# protocols for all their dynamically generated classes (try to migrate this to a PR on botocore).
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: disable=invalid-name, unused-argument
@runtime_checkable
class AwsBatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.AwsBatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: List[str]) -> Dict:
"""
Get job descriptions from AWS batch
:param jobs: a list of JobId to describe
:type jobs: List[str]
:return: an API response to describe jobs
:rtype: Dict
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiterName: str
:return: a waiter object for the named AWS batch service
:rtype: botocore.waiter.Waiter
.. note::
AWS batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client('batch').waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: Dict,
parameters: Dict,
containerOverrides: Dict,
) -> Dict:
"""
Submit a batch job
:param jobName: the name for the AWS batch job
:type jobName: str
:param jobQueue: the queue name on AWS Batch
:type jobQueue: str
:param jobDefinition: the job definition name on AWS Batch
:type jobDefinition: str
:param arrayProperties: the same parameter that boto3 will receive
:type arrayProperties: Dict
:param parameters: the same parameter that boto3 will receive
:type parameters: Dict
:param containerOverrides: the same parameter that boto3 will receive
:type containerOverrides: Dict
:return: an API response
:rtype: Dict
"""
...
def terminate_job(self, jobId: str, reason: str) -> Dict:
"""
Terminate a batch job
:param jobId: a job ID to terminate
:type jobId: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
...
# Note that the use of invalid-name parameters should be restricted to the boto3 mappings only;
# all the Airflow wrappers of boto3 clients should not adopt invalid-names to match boto3.
# pylint: enable=invalid-name, unused-argument
class AwsBatchClientHook(AwsBaseHook):
"""
A client for AWS batch services.
:param max_retries: exponential back-off retries, 4200 = 48 hours;
polling is only used when waiters is None
:type max_retries: Optional[int]
:param status_retries: number of HTTP retries to get job status, 10;
polling is only used when waiters is None
:type status_retries: Optional[int]
.. note::
Several methods use a default random delay to check or poll for job status, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``
Using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
To modify the global defaults for the range of jitter allowed when a
random delay is used to check batch job status, modify these defaults, e.g.:
.. code-block::
AwsBatchClient.DEFAULT_DELAY_MIN = 0
AwsBatchClient.DEFAULT_DELAY_MAX = 5
When explict delay values are used, a 1 second random jitter is applied to the
delay (e.g. a delay of 0 sec will be a ``random.uniform(0, 1)`` delay. It is
generally recommended that random jitter is added to API requests. A
convenience method is provided for this, e.g. to get a random delay of
10 sec +/- 5 sec: ``delay = AwsBatchClient.add_jitter(10, width=5, minima=0)``
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
MAX_RETRIES = 4200
STATUS_RETRIES = 10
# delays are in seconds
DEFAULT_DELAY_MIN = 1
DEFAULT_DELAY_MAX = 10
def __init__(
self, *args, max_retries: Optional[int] = None, status_retries: Optional[int] = None, **kwargs
) -> None:
# https://github.com/python/mypy/issues/6799 hence type: ignore
super().__init__(client_type='batch', *args, **kwargs) # type: ignore
self.max_retries = max_retries or self.MAX_RETRIES
self.status_retries = status_retries or self.STATUS_RETRIES
@property
def client(self) -> Union[AwsBatchProtocol, botocore.client.BaseClient]: # noqa: D402
"""
An AWS API client for batch services, like ``boto3.client('batch')``
:return: a boto3 'batch' client for the ``.region_name``
:rtype: Union[AwsBatchProtocol, botocore.client.BaseClient]
"""
return self.conn
def terminate_job(self, job_id: str, reason: str) -> Dict:
"""
Terminate a batch job
:param job_id: a job ID to terminate
:type job_id: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response
:rtype: Dict
"""
response = self.get_conn().terminate_job(jobId=job_id, reason=reason)
self.log.info(response)
return response
def check_job_success(self, job_id: str) -> bool:
"""
Check the final status of the batch job; return True if the job
'SUCCEEDED', else raise an AirflowException
:param job_id: a batch job ID
:type job_id: str
:rtype: bool
:raises: AirflowException
"""
job = self.get_job_description(job_id)
job_status = job.get("status")
if job_status == "SUCCEEDED":
self.log.info("AWS batch job (%s) succeeded: %s", job_id, job)
return True
if job_status == "FAILED":
raise AirflowException(f"AWS Batch job ({job_id}) failed: {job}")
if job_status in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"]:
raise AirflowException(f"AWS Batch job ({job_id}) is not complete: {job}")
raise AirflowException(f"AWS Batch job ({job_id}) has unknown status: {job}")
def wait_for_job(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Wait for batch job to complete
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
self.poll_for_job_running(job_id, delay)
self.poll_for_job_complete(job_id, delay)
self.log.info("AWS Batch job (%s) has completed", job_id)
def poll_for_job_running(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Poll for job running. The status that indicates a job is running or
already complete are: 'RUNNING'|'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'|'SUCCEEDED'|'FAILED'
The completed status options are included for cases where the status
changes too quickly for polling to detect a RUNNING status that moves
quickly from STARTING to RUNNING to completed (often a failure).
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
running_status = ["RUNNING", "SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, running_status)
def poll_for_job_complete(self, job_id: str, delay: Union[int, float, None] = None) -> None:
"""
Poll for job completion. The status that indicates job completion
are: 'SUCCEEDED'|'FAILED'.
So the status options that this will wait for are the transitions from:
'SUBMITTED'>'PENDING'>'RUNNABLE'>'STARTING'>'RUNNING'>'SUCCEEDED'|'FAILED'
:param job_id: a batch job ID
:type job_id: str
:param delay: a delay before polling for job status
:type delay: Optional[Union[int, float]]
:raises: AirflowException
"""
self.delay(delay)
complete_status = ["SUCCEEDED", "FAILED"]
self.poll_job_status(job_id, complete_status)
def poll_job_status(self, job_id: str, match_status: List[str]) -> bool:
"""
Poll for job status using an exponential back-off strategy (with max_retries).
:param job_id: a batch job ID
:type job_id: str
:param match_status: a list of job status to match; the batch job status are:
'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
:type match_status: List[str]
:rtype: bool
:raises: AirflowException
"""
retries = 0
while True:
job = self.get_job_description(job_id)
job_status = job.get("status")
self.log.info(
"AWS Batch job (%s) check status (%s) in %s",
job_id,
job_status,
match_status,
)
if job_status in match_status:
return True
if retries >= self.max_retries:
raise AirflowException(f"AWS Batch job ({job_id}) status checks exceed max_retries")
retries += 1
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) status check (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.max_retries,
pause,
)
self.delay(pause)
def get_job_description(self, job_id: str) -> Dict:
"""
Get job description (using status_retries).
:param job_id: a batch job ID
:type job_id: str
:return: an API response for describe jobs
:rtype: Dict
:raises: AirflowException
"""
retries = 0
while True:
try:
response = self.get_conn().describe_jobs(jobs=[job_id])
return self.parse_job_description(job_id, response)
except botocore.exceptions.ClientError as err:
error = err.response.get("Error", {})
if error.get("Code") == "TooManyRequestsException":
pass # allow it to retry, if possible
else:
raise AirflowException(f"AWS Batch job ({job_id}) description error: {err}")
retries += 1
if retries >= self.status_retries:
raise AirflowException(
"AWS Batch job ({}) description error: exceeded "
"status_retries ({})".format(job_id, self.status_retries)
)
pause = self.exponential_delay(retries)
self.log.info(
"AWS Batch job (%s) description retry (%d of %d) in the next %.2f seconds",
job_id,
retries,
self.status_retries,
pause,
)
self.delay(pause)
@staticmethod
def parse_job_description(job_id: str, response: Dict) -> Dict:
"""
Parse job description to extract description for job_id
:param job_id: a batch job ID
:type job_id: str
:param response: an API response for describe jobs
:type response: Dict
:return: an API response to describe job_id
:rtype: Dict
:raises: AirflowException
"""
jobs = response.get("jobs", [])
matching_jobs = [job for job in jobs if job.get("jobId") == job_id]
if len(matching_jobs) != 1:
raise AirflowException(f"AWS Batch job ({job_id}) description error: response: {response}")
return matching_jobs[0]
@staticmethod
def add_jitter(
delay: Union[int, float], width: Union[int, float] = 1, minima: Union[int, float] = 0
) -> float:
"""
Use delay +/- width for random jitter
Adding jitter to status polling can help to avoid
AWS batch API limits for monitoring batch jobs with
a high concurrency in Airflow tasks.
:param delay: number of seconds to pause;
delay is assumed to be a positive number
:type delay: Union[int, float]
:param width: delay +/- width for random jitter;
width is assumed to be a positive number
:type width: Union[int, float]
:param minima: minimum delay allowed;
minima is assumed to be a non-negative number
:type minima: Union[int, float]
:return: uniform(delay - width, delay + width) jitter
and it is a non-negative number
:rtype: float
"""
delay = abs(delay)
width = abs(width)
minima = abs(minima)
lower = max(minima, delay - width)
upper = delay + width
return uniform(lower, upper)
@staticmethod
def delay(delay: Union[int, float, None] = None) -> None:
"""
Pause execution for ``delay`` seconds.
:param delay: a delay to pause execution using ``time.sleep(delay)``;
a small 1 second jitter is applied to the delay.
:type delay: Optional[Union[int, float]]
.. note::
This method uses a default random delay, i.e.
``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``;
using a random interval helps to avoid AWS API throttle limits
when many concurrent tasks request job-descriptions.
"""
if delay is None:
delay = uniform(AwsBatchClientHook.DEFAULT_DELAY_MIN, AwsBatchClientHook.DEFAULT_DELAY_MAX)
else:
delay = AwsBatchClientHook.add_jitter(delay)
sleep(delay)
@staticmethod
def exponential_delay(tries: int) -> float:
"""
An exponential back-off delay, with random jitter. There is a maximum
interval of 10 minutes (with random jitter between 3 and 10 minutes).
This is used in the :py:meth:`.poll_for_job_status` method.
:param tries: Number of tries
:type tries: int
:rtype: float
Examples of behavior:
.. code-block:: python
def exp(tries):
max_interval = 600.0 # 10 minutes in seconds
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
print(delay / 3, delay)
for tries in range(10):
exp(tries)
# 0.33 1.0
# 0.45 1.35
# 0.81 2.44
# 1.41 4.23
# 2.25 6.76
# 3.33 10.00
# 4.65 13.95
# 6.21 18.64
# 8.01 24.04
# 10.05 30.15
.. seealso::
- https://docs.aws.amazon.com/general/latest/gr/api-retries.html
- https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
"""
max_interval = 600.0 # results in 3 to 10 minute delay
delay = 1 + pow(tries * 0.6, 2)
delay = min(max_interval, delay)
return uniform(delay / 3, delay)
| {
"content_hash": "f34952de8950613b7f4ac19755378db0",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 103,
"avg_line_length": 33.9124513618677,
"alnum_prop": 0.5967529114795479,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "84ff4c4644ae9af9f568dafb583ca879d428f949",
"size": "18219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/amazon/aws/hooks/batch_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
} |
"""
A simple scribble program in Python which can draw on the canvas along mouse drags.
Author: Semih Onay
"""
import sys
from tkinter import *
x,y=None,None
def quit(event):
sys.exit()
def drag(event):
global x,y
newx,newy=event.x,event.y
if x is None:
x,y=newx,newy
return
print("dragging",event)
c.create_line(((x,y),(newx,newy)))
x,y=newx,newy
def drag_end(event):
global x,y
x,y=None,None
root = Tk()
c=Canvas(root)
c.bind("<B1-Motion>",drag)
c.bind("<ButtonRelease-1>",drag_end)
c.pack()
b=Button(root,text="Quit")
b.pack()
b.bind("<Button-1>",quit)
root.mainloop()
| {
"content_hash": "2214649d41fc9a3ccd52abfeffe56bb3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 83,
"avg_line_length": 19,
"alnum_prop": 0.6411483253588517,
"repo_name": "Semyonic/Study",
"id": "683e59c05aea89847faf90767741925609c96e39",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Sketch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10792"
},
{
"name": "C++",
"bytes": "5886"
},
{
"name": "Java",
"bytes": "3229"
},
{
"name": "Perl",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "4230"
}
],
"symlink_target": ""
} |
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
if re.search('From:', line) :
print line
| {
"content_hash": "e904dc6deda976c094667c5b3cd67c6f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 34,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.5942028985507246,
"repo_name": "lastralab/Statistics",
"id": "965ecd2dc1feac19fb384cb442130f752b0dc217",
"size": "198",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Specialization/Dr. Chuck-s Code/re01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "Python",
"bytes": "249488"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""check for new / old style related problems
"""
from logilab import astng
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
MSGS = {
'E1001': ('Use of __slots__ on an old style class',
'slots-on-old-class',
'Used when an old style class uses the __slots__ attribute.'),
'E1002': ('Use of super on an old style class',
'super-on-old-class',
'Used when an old style class uses the super builtin.'),
'E1003': ('Bad first argument %r given to super class',
'bad-super-call',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'W1001': ('Use of "property" on an old style class',
'property-on-old-class',
'Used when PyLint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features'),
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages('E1001')
def visit_class(self, node):
"""check __slots__ usage
"""
if '__slots__' in node and not node.newstyle:
self.add_message('E1001', node=node)
@check_messages('W1001')
def visit_callfunc(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astng.Class) and
not parent.newstyle and
isinstance(node.func, astng.Name)):
name = node.func.name
if name == 'property':
self.add_message('W1001', node=node)
@check_messages('E1002', 'E1003')
def visit_function(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr):
continue
call = expr.expr
# skip the test if using super
if isinstance(call, astng.CallFunc) and \
isinstance(call.func, astng.Name) and \
call.func.name == 'super':
if not klass.newstyle:
# super should not be used on an old style class
self.add_message('E1002', node=node)
else:
# super first arg should be the class
try:
supcls = (call.args and call.args[0].infer().next()
or None)
except astng.InferenceError:
continue
if klass is not supcls:
supcls = getattr(supcls, 'name', supcls)
self.add_message('E1003', node=node, args=supcls)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
| {
"content_hash": "44006933d8aadeeb626a4c1a3c5e23fc",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 37.91752577319588,
"alnum_prop": 0.5402392604676455,
"repo_name": "yongshengwang/hue",
"id": "edadad88d3f76ac644e378ece2e371755fb7df34",
"size": "4489",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/pylint-0.28.0-py2.7.egg/pylint/checkers/newstyle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2479183"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "1133541"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "28547"
},
{
"name": "HTML",
"bytes": "26230478"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "9757355"
},
{
"name": "Makefile",
"bytes": "94066"
},
{
"name": "Mako",
"bytes": "2185828"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "88056623"
},
{
"name": "Scala",
"bytes": "191428"
},
{
"name": "Shell",
"bytes": "59514"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "VimL",
"bytes": "1530"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class ApplicationGatewayRedirectConfiguration(SubResource):
"""Redirect configuration of an application gateway.
:param id: Resource ID.
:type id: str
:param redirect_type: Supported http redirection types - Permanent,
Temporary, Found, SeeOther. Possible values include: 'Permanent', 'Found',
'SeeOther', 'Temporary'
:type redirect_type: str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayRedirectType
:param target_listener: Reference to a listener to redirect the request
to.
:type target_listener: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param target_url: Url to redirect the request to.
:type target_url: str
:param include_path: Include path in the redirected url.
:type include_path: bool
:param include_query_string: Include query string in the redirected url.
:type include_query_string: bool
:param request_routing_rules: Request routing specifying redirect
configuration.
:type request_routing_rules:
list[~azure.mgmt.network.v2017_11_01.models.SubResource]
:param url_path_maps: Url path maps specifying default redirect
configuration.
:type url_path_maps:
list[~azure.mgmt.network.v2017_11_01.models.SubResource]
:param path_rules: Path rules specifying redirect configuration.
:type path_rules: list[~azure.mgmt.network.v2017_11_01.models.SubResource]
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'redirect_type': {'key': 'properties.redirectType', 'type': 'str'},
'target_listener': {'key': 'properties.targetListener', 'type': 'SubResource'},
'target_url': {'key': 'properties.targetUrl', 'type': 'str'},
'include_path': {'key': 'properties.includePath', 'type': 'bool'},
'include_query_string': {'key': 'properties.includeQueryString', 'type': 'bool'},
'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[SubResource]'},
'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[SubResource]'},
'path_rules': {'key': 'properties.pathRules', 'type': '[SubResource]'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, redirect_type=None, target_listener=None, target_url=None, include_path=None, include_query_string=None, request_routing_rules=None, url_path_maps=None, path_rules=None, name=None, etag=None, type=None):
super(ApplicationGatewayRedirectConfiguration, self).__init__(id=id)
self.redirect_type = redirect_type
self.target_listener = target_listener
self.target_url = target_url
self.include_path = include_path
self.include_query_string = include_query_string
self.request_routing_rules = request_routing_rules
self.url_path_maps = url_path_maps
self.path_rules = path_rules
self.name = name
self.etag = etag
self.type = type
| {
"content_hash": "cb17fa7519cc339d8a8a3470a3da0cba",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 235,
"avg_line_length": 48.785714285714285,
"alnum_prop": 0.6667642752562225,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "42ac21c6e15df6c71017064606b3f7199cba10f2",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_redirect_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from copy import deepcopy
import numpy as np
class History(object):
# save s,a,r,s',d
def __init__(self, length_buffer, time_window=2):
# make some kind of ring buffer with certain length
self.length_buffer = length_buffer
self.time_window = time_window
self.buffer = [None] * self.length_buffer
self.input = 0
self.output = 0
self.full = False
def append(self, item):
self.buffer[self.input] = deepcopy(item)
self.input += 1
if self.input == self.length_buffer:
# give breaf note that buffer is full
if not self.full:
print("\t\t!!!Buffer is full, starting to overwrite entries!!!")
self.full = True
# set index to start
self.input = 0
if self.input > self.length_buffer:
print("Input index to high!")
raise IOError
def sample_output(self):
# TODO: at some point Non got returned!!
output_list = []
try:
# check if there are None entries ( buffer not filled )
max = self.buffer.index(None) - 1 # maybe this helps with respect to the to-do
min = (self.time_window - 1)
except ValueError:
# if filled set max to length min to zero
max = self.length_buffer
min = 0
max_index = np.random.randint(low=min, high=max)
# don't mix entries where we add new entries and delete old ones
while self.input <= max_index < (self.input + self.time_window):
max_index = np.random.randint(low=min, high=max)
# output the corresponding frames
if max_index < self.time_window:
min_index = max_index - self.time_window
output_list += self.buffer[min_index:]
output_list += self.buffer[:max_index]
else:
output_list += self.buffer[(max_index - self.time_window):max_index]
return output_list
| {
"content_hash": "ba24fe7ec5c5873c72bdb6cf31495959",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 91,
"avg_line_length": 38.53846153846154,
"alnum_prop": 0.5718562874251497,
"repo_name": "BerlinUnited/NaoTH",
"id": "ee5f0b6bf7889fa46f2f1c6cfe26e344899694db",
"size": "2004",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Utils/py/RL_ActionSelection/ring_buffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "319"
},
{
"name": "C",
"bytes": "16295"
},
{
"name": "C++",
"bytes": "3831321"
},
{
"name": "CSS",
"bytes": "8839"
},
{
"name": "HTML",
"bytes": "21148"
},
{
"name": "Java",
"bytes": "1816793"
},
{
"name": "Jupyter Notebook",
"bytes": "8092"
},
{
"name": "Lua",
"bytes": "73794"
},
{
"name": "MATLAB",
"bytes": "141780"
},
{
"name": "Python",
"bytes": "1337382"
},
{
"name": "Shell",
"bytes": "60599"
}
],
"symlink_target": ""
} |
import uuid
import django.db.models.deletion
import jsonfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='HttpExchange',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('object_id', models.PositiveIntegerField()),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Date')),
('request_headers', jsonfield.fields.JSONField(verbose_name='Request headers')),
('request_body', models.TextField(verbose_name='Request body')),
('response_headers', jsonfield.fields.JSONField(verbose_name='Request headers')),
('response_body', models.TextField(verbose_name='Response body')),
('status_code', models.IntegerField(default=200, verbose_name='Status code')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'ordering': ['-date'],
},
),
]
| {
"content_hash": "25aae3b37359d663f1c0a06beb52dbc7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 128,
"avg_line_length": 38.794117647058826,
"alnum_prop": 0.599696739954511,
"repo_name": "rtfd/readthedocs.org",
"id": "c1ee6c0b7140b658d8ec88a606a1ccbab9e59967",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/integrations/migrations/0001_add_http_exchange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
"""
A collection of higher-level Twisted Web resources, suitable for use with any
existing ``IResource`` implementations.
`SpinneretResource` adapts an `ISpinneretResource` to `IResource`.
`ContentTypeNegotiator` will negotiate a resource based on the ``Accept``
header.
"""
from twisted.internet.defer import Deferred, maybeDeferred, succeed
from twisted.python.compat import nativeString
from twisted.python.urlpath import URLPath
from twisted.web import http
from twisted.web.error import UnsupportedMethod
from twisted.web.iweb import IRenderable
from twisted.web.resource import (
IResource, NoResource, Resource, _computeAllowedMethods)
from twisted.web.server import NOT_DONE_YET
from twisted.web.template import renderElement
from twisted.web.util import DeferredResource, Redirect
from txspinneret.interfaces import ISpinneretResource
from txspinneret.util import _parseAccept
def _renderResource(resource, request):
"""
Render a given resource.
See `IResource.render <twisted:twisted.web.resource.IResource.render>`.
"""
meth = getattr(resource, 'render_' + nativeString(request.method), None)
if meth is None:
try:
allowedMethods = resource.allowedMethods
except AttributeError:
allowedMethods = _computeAllowedMethods(resource)
raise UnsupportedMethod(allowedMethods)
return meth(request)
class NotAcceptable(Resource):
"""
Leaf resource that renders an empty body for ``406 Not Acceptable``.
"""
isLeaf = True
def render(self, request):
request.setResponseCode(http.NOT_ACCEPTABLE)
return b''
class NotFound(NoResource):
"""
Leaf resource that renders a page for ``404 Not Found``.
"""
def __init__(self):
NoResource.__init__(self, b'Resource not found')
class _RenderableResource(Resource):
"""
Adapter from `IRenderable` to `IResource`.
"""
isLeaf = True
def __init__(self, renderable, doctype=b'<!DOCTYPE html>'):
Resource.__init__(self)
self._renderable = renderable
self._doctype = doctype
def render(self, request):
request.setResponseCode(http.OK)
return renderElement(request, self._renderable, self._doctype)
class SpinneretResource(Resource):
"""
Adapter from `ISpinneretResource` to `IResource`.
"""
def __init__(self, wrappedResource):
"""
:type wrappedResource: `ISpinneretResource`
:param wrappedResource: Spinneret resource to wrap in an `IResource`.
"""
self._wrappedResource = wrappedResource
Resource.__init__(self)
def _adaptToResource(self, result):
"""
Adapt a result to `IResource`.
Several adaptions are tried they are, in order: ``None``,
`IRenderable <twisted:twisted.web.iweb.IRenderable>`, `IResource
<twisted:twisted.web.resource.IResource>`, and `URLPath
<twisted:twisted.python.urlpath.URLPath>`. Anything else is returned as
is.
A `URLPath <twisted:twisted.python.urlpath.URLPath>` is treated as
a redirect.
"""
if result is None:
return NotFound()
spinneretResource = ISpinneretResource(result, None)
if spinneretResource is not None:
return SpinneretResource(spinneretResource)
renderable = IRenderable(result, None)
if renderable is not None:
return _RenderableResource(renderable)
resource = IResource(result, None)
if resource is not None:
return resource
if isinstance(result, URLPath):
return Redirect(str(result))
return result
def getChildWithDefault(self, path, request):
def _setSegments(result):
result, segments = result
request.postpath[:] = segments
return result
def _locateChild(request, segments):
def _defaultLocateChild(request, segments):
return NotFound(), []
locateChild = getattr(
self._wrappedResource, 'locateChild', _defaultLocateChild)
return locateChild(request, segments)
d = maybeDeferred(
_locateChild, request, request.prepath[-1:] + request.postpath)
d.addCallback(_setSegments)
d.addCallback(self._adaptToResource)
return DeferredResource(d)
def _handleRenderResult(self, request, result):
"""
Handle the result from `IResource.render`.
If the result is a `Deferred` then return `NOT_DONE_YET` and add
a callback to write the result to the request when it arrives.
"""
def _requestFinished(result, cancel):
cancel()
return result
if not isinstance(result, Deferred):
result = succeed(result)
def _whenDone(result):
render = getattr(result, 'render', lambda request: result)
renderResult = render(request)
if renderResult != NOT_DONE_YET:
request.write(renderResult)
request.finish()
return result
request.notifyFinish().addBoth(_requestFinished, result.cancel)
result.addCallback(self._adaptToResource)
result.addCallback(_whenDone)
result.addErrback(request.processingFailed)
return NOT_DONE_YET
def render(self, request):
# This is kind of terrible but we need `_RouterResource.render` to be
# called to handle the null route. Finding a better way to achieve this
# would be great.
if hasattr(self._wrappedResource, 'render'):
result = self._wrappedResource.render(request)
else:
result = _renderResource(self._wrappedResource, request)
return self._handleRenderResult(request, result)
class ContentTypeNegotiator(Resource):
"""
Negotiate an appropriate representation based on the ``Accept`` header.
Rendering this resource will negotiate a representation and render the
matching handler.
"""
def __init__(self, handlers, fallback=False):
"""
:type handlers: ``iterable`` of `INegotiableResource` and either
`IResource` or `ISpinneretResource`.
:param handlers: Iterable of negotiable resources, either
`ISpinneretResource` or `IResource`, to use as handlers for
negotiation.
:type fallback: `bool`
:param fallback: Fall back to the first handler in the case where
negotiation fails?
"""
Resource.__init__(self)
self._handlers = list(handlers)
self._fallback = fallback
self._acceptHandlers = {}
for handler in self._handlers:
for acceptType in handler.acceptTypes:
if acceptType in self._acceptHandlers:
raise ValueError(
'Duplicate handler for %r' % (acceptType,))
self._acceptHandlers[acceptType] = handler
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None
def render(self, request):
resource, contentType = self._negotiateHandler(request)
if contentType is not None:
request.setHeader(b'Content-Type', contentType)
spinneretResource = ISpinneretResource(resource, None)
if spinneretResource is not None:
resource = SpinneretResource(spinneretResource)
return resource.render(request)
__all__ = [
'SpinneretResource', 'ContentTypeNegotiator', 'NotAcceptable', 'NotFound']
| {
"content_hash": "c6f73cb3ca6c7e25ab193863b43bdeea",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 32.36078431372549,
"alnum_prop": 0.6439650993698497,
"repo_name": "jonathanj/txspinneret",
"id": "273fd923cbae7b4092090434cca451db93066347",
"size": "8252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txspinneret/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81409"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = "{{ cookiecutter.project_slug }}.accounts"
| {
"content_hash": "12e2b0384a8e05fb7a1715defe43c12a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 24.6,
"alnum_prop": 0.7479674796747967,
"repo_name": "r0x73/django-template",
"id": "d724c0ce1ecbe8505d1c653bd3a5df20eac2a32a",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/accounts/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "464"
},
{
"name": "Makefile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "26603"
},
{
"name": "Shell",
"bytes": "516"
}
],
"symlink_target": ""
} |
from yambopy import *
import os
#
# by Henrique Miranda
#
def pack_files_in_folder(folder,save_folder=None,mask='',verbose=True):
"""
Pack the output files in a folder to json files
"""
if not save_folder: save_folder = folder
#pack the files in .json files
for dirpath,dirnames,filenames in os.walk(folder):
#check if the folder fits the mask
if mask in dirpath:
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
print dirpath
y = YamboOut(dirpath,save_folder=save_folder)
y.pack()
#
# by Alejandro Molina-Sanchez
#
def breaking_symmetries(efield1,efield2=[0,0,0],folder='.',RmTimeRev=True):
"""
Breaks the symmetries for a given field.
Second field used in circular polarized pump configuration
RmTimeRev : Remove time symmetry is set True by default
"""
os.system('mkdir -p %s'%folder)
os.system('cp -r database/SAVE %s'%folder)
os.system('cd %s; yambo'%folder)
ypp = YamboIn('ypp_ph -y -V all',folder=folder,filename='ypp.in')
ypp['Efield1'] = efield1 # Field in the X-direction
ypp['Efield2'] = efield2 # Field in the X-direction
if RmTimeRev:
ypp.arguments.append('RmTimeRev') # Remove Time Symmetry
ypp.write('%s/ypp.in'%folder)
os.system('cd %s ; ypp_ph -F ypp.in'%folder )
os.system('cd %s ; cd FixSymm; yambo '%folder )
os.system('rm -r %s/SAVE'%folder)
os.system('mv %s/FixSymm/SAVE %s/'%(folder,folder))
os.system('rm -r %s/FixSymm'%folder)
#
# by Alexandre Morlet & Henrique Miranda
#
def analyse_gw(folder,var,bandc,kpointc,bandv,kpointv,pack,text,draw):
"""
Study the convergence of GW calculations by looking at the change in band-gap value.
The script reads from <folder> all results from <variable> calculations and display them.
Use the band and k-point options (or change default values) according to the size of your k-grid and
the location of the band extrema.
"""
from pylab import *
print 'Valence band: ',bandv,'conduction band: ',bandc
print 'K-point VB: ',kpointv, ' k-point CB: ',kpointc
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack:
print 'Packing ...'
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
# importing data from .json files in <folder>
print 'Importing data...'
data = YamboAnalyser(folder)
# extract data according to relevant variable
outvars = data.get_data(tags=(var,'reference'))
invars = data.get_inputfiles_tag(var)
tags = data.get_tags(tags=(var,'reference'))
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(invars.items(), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
if len(keys) == 0: raise ValueError('No files with this variable were found')
print 'Files detected:'
for key in keys:
print key
print 'Computing values...'
### Output
# Unit of the variable :
unit = invars[keys[0]]['variables'][var][1]
# The following variables are used to make the script compatible with both short and extended output
kpindex = tags[keys[0]].tolist().index('K-point')
bdindex = tags[keys[0]].tolist().index('Band')
e0index = tags[keys[0]].tolist().index('Eo')
gwindex = tags[keys[0]].tolist().index('E-Eo')
array = np.zeros((len(keys),2))
for i,key in enumerate(keys):
# input value
# GbndRnge and BndsRnX_ are special cases
if var.startswith('GbndRng') or var.startswith('BndsRnX'):
# format : [1, nband, ...]
array[i][0] = invars[key]['variables'][var][0][1]
else:
array[i][0] = invars[key]['variables'][var][0]
# Output value (gap energy)
# First the relevant lines are identified
valence=[]
conduction=[]
for j in range(len(outvars[key]+1)):
if outvars[key][j][kpindex]==kpointc and outvars[key][j][bdindex]==bandc:
conduction=outvars[key][j]
elif outvars[key][j][kpindex]==kpointv and outvars[key][j][bdindex]==bandv:
valence = outvars[key][j]
# Then the gap can be calculated
array[i][1] = conduction[e0index]+conduction[gwindex]-(valence[e0index]+valence[gwindex])
if text:
os.system('mkdir -p analyse_%s'%folder)
outname = './analyse_%s/%s_%s.dat'%(folder,folder,var)
header = var+' ('+str(unit)+'), gap'
np.savetxt(outname,array,delimiter='\t',header=header)
print 'Data saved to ',outname
if draw:
plt.plot(array[:,0],array[:,1],'o-')
plt.xlabel(var+' ('+unit+')')
plt.ylabel('E_gw = E_lda + \Delta E')
plt.savefig('%s.png'%var)
if 'DISPLAY' in os.environ:
plt.show()
print 'Done.'
#
# by Alexandre Morlet
#
def analyse_bse(folder,var,numbexc,intexc,degenexc,maxexc,pack,text,draw):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_<folder>/ folder.
Arguments:
folder -> Folder containing SAVE and convergence runs.
var -> Variable tested (e.g. FFTGvecs)
numbexc -> Number of excitons to read beyond threshold (default=2)
intexc -> Minimum intensity for excitons to be considered bright (default=0.05)
degenexc -> Energy threshold under which different peaks are merged (eV) (default=0.01)
maxexc -> Energy threshold after which excitons are not read anymore (eV) (default=8.0)
pack -> Skips packing o- files into .json files (default: True)
text -> Skips writing the .dat file (default: True)
draw -> Skips drawing (plotting) the abs spectra (default: True)
Returns:
excitons -> energies of the first few excitons as funciton of some variable
spectras -> absorption spectra for each variable
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print 'Packing ...'
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
# importing data from .json files in <folder>
print 'Importing data...'
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(invars.items(), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
if len(keys) == 0: raise ValueError('No files with this variable were found')
print 'Files detected:'
for key in keys:
print key
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_%s'%folder)
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
spectras = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print jobname
# input value
v = invars[key]['variables'][var][0]
if type(v) == list:
inp = v[1]
else:
inp = v
print 'Preparing JSON file. Calling ypp if necessary.'
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=intexc,max_energy=maxexc,Degen_Step=degenexc)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
### Plotting the absorption spectra
spectras.append({'x': data['E/ev[1]'],
'y': data['EPS-Im[2]'],
'label': jobname})
### BSE spectra
### Axes : lines for exciton energies (disabled, would make a mess)
#for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= numbexc-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
## Excitons energies
#output on the screen
print header
for exc in excitons:
x = exc[0]
e = exc[1:]
print "%8.4lf "%x+("%8.4lf"*len(e))%tuple(e)
#save file
filename = outname+'_excitons.dat'
np.savetxt(filename,excitons,header=header)
print filename
## Spectra
filename = outname+'_spectra.dat'
f = open(filename,'w')
for spectra in spectras:
label = spectra['label']
f.write('#%s\n'%label)
for x,y in zip(spectra['x'],spectra['y']):
f.write("%12.8e %12.8e\n"%(x,y))
f.write('\n\n')
f.close()
print filename
else:
print '-nt flag : no text produced.'
if draw:
## Exciton energy plots
filename = outname+'_excitons.png'
excitons = np.array(excitons)
labels = [spectra['label'] for spectra in spectras]
fig = plt.figure(figsize=(6,5))
matplotlib.rcParams.update({'font.size': 15})
plt.ylabel('1st exciton energy (eV)')
plt.xticks(excitons[:,0],labels)
plt.plot(excitons[:,0],excitons[:,1])
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
if 'DISPLAY' in os.environ:
plt.show()
print filename
## Spectra plots
filename = outname+'_spectra.png'
fig = plt.figure(figsize=(6,5))
matplotlib.rcParams.update({'font.size': 15})
for spectra in spectras:
plt.plot(spectra['x'],spectra['y'],label=spectra['label'])
plt.xlabel('$\omega$ (eV)')
plt.ylabel('Im($\epsilon_M$)')
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
if 'DISPLAY' in os.environ:
plt.show()
print filename
else:
print '-nd flag : no plot produced.'
print 'Done.'
return excitons, spectras
#
# by Fulvio Paleari & Henrique Miranda
#
def merge_qp(output,files,verbose=False):
#read all the files and display main info in each of them
print "=========input========="
filenames = [ f.name for f in files]
datasets = [ Dataset(filename) for filename in filenames]
QP_table, QP_kpts, QP_E_E0_Z = [], [], []
for d,filename in zip(datasets,filenames):
_, nkpoints, nqps, _, nstrings = map(int,d['PARS'][:])
print "filename: ", filename
if verbose:
print "description:"
for i in xrange(1,nstrings+1):
print ''.join(d['DESC_strings_%05d'%i][0])
else:
print "description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0])
print
QP_table.append( d['QP_table'][:].T )
QP_kpts.append( d['QP_kpts'][:].T )
QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] )
# create the QP_table
QP_table_save = np.vstack(QP_table)
# create the kpoints table
#create a list with the bigger size of QP_table
nkpoints = int(max(QP_table_save[:,2]))
QP_kpts_save = np.zeros([nkpoints,3])
#iterate over the QP's and store the corresponding kpoint
for qp_file,kpts in zip(QP_table,QP_kpts):
#iterate over the kpoints and save the coordinates on the list
for qp in qp_file:
n1,n2,nk = map(int,qp)
QP_kpts_save[nk-1] = kpts[nk-1]
# create the QPs energies table
QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1)
#create reference file from one of the files
netcdf_format = datasets[0].data_model
fin = datasets[0]
fout = Dataset(output,'w',format=netcdf_format)
variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z']
variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save]
variables_dict = dict(zip(variables_update,variables_save))
PARS_save = fin['PARS'][:]
PARS_save[1:3] = nkpoints,len(QP_table_save)
#create the description string
kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2])
bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1])
description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax)
description_save = np.array([i for i in " %s"%description])
#output data
print "========output========="
print "filename: ", output
print "description: ", description
#copy dimensions
for dname, the_dim in fin.dimensions.iteritems():
fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)
#get dimensions
def dimensions(array):
return tuple([ 'D_%010d'%d for d in array.shape ])
#create missing dimensions
for v in variables_save:
for dname,d in zip( dimensions(v),v.shape ):
if dname not in fout.dimensions.keys():
fout.createDimension(dname, d)
#copy variables
for v_name, varin in fin.variables.iteritems():
if v_name in variables_update:
#get the variable
merged = variables_dict[v_name]
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged))
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
#save outvar
outVar[:] = merged
else:
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions)
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name=='PARS':
outVar[:] = PARS_save[:]
elif v_name=='DESC_strings_%05d'%(nstrings):
outVar[:] = varin[:]
outVar[:,:len(description_save)] = description_save.T
else:
outVar[:] = varin[:]
fout.close()
#
# by Henrique Miranda
#
def plot_excitons(filename,cut=0.2,size=20):
from math import ceil, sqrt
def get_var(dictionary,variables):
"""
To have compatibility with different version of yambo
We provide a list of different possible tags
"""
for var in variables:
if var in dictionary:
return dictionary[var]
raise ValueError( 'Could not find the variables %s in the output file'%str(variables) )
#
# read file
#
f = open(filename)
data = json.load(f)
f.close()
#
# plot the absorption spectra
#
nexcitons = len(data['excitons'])
print "nexitons", nexcitons
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPS-Im[2]' ]),label='BSE',lw=2)
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPSo-Im[4]']),label='IP',lw=2)
for n,exciton in enumerate(data['excitons']):
plt.axvline(exciton['energy'])
plt.xlabel('$\\omega$ (eV)')
plt.ylabel('Intensity arb. units')
plt.legend(frameon=False)
plt.draw()
#
# plot excitons
#
#dimensions
nx = int(ceil(sqrt(nexcitons)))
ny = int(ceil(nexcitons*1.0/nx))
print "cols:",nx
print "rows:",ny
cmap = plt.get_cmap("gist_heat_r")
fig = plt.figure(figsize=(nx*3,ny*3))
sorted_excitons = sorted(data['excitons'],key=lambda x: x['energy'])
for n,exciton in enumerate(sorted_excitons):
#get data
w = np.array(exciton['weights'])
qpt = np.array(exciton['qpts'])
#plot
ax = plt.subplot(ny,nx,n+1)
ax.scatter(qpt[:,0], qpt[:,1], s=size, c=w, marker='H', cmap=cmap, lw=0, label="%5.2lf (eV)"%exciton['energy'])
ax.text(-cut*.9,-cut*.9,"%5.2lf (eV)"%exciton['energy'])
# axis
plt.xlim([-cut,cut])
plt.ylim([-cut,cut])
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
ax.set_aspect('equal')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.01, hspace=0.01)
#remove extension from file
figure_filename = os.path.splitext(filename)[0]
plt.savefig('%s.png'%figure_filename)
if 'DISPLAY' in os.environ:
plt.show()
| {
"content_hash": "171e7df76e83f5a801378bbffaaac8ed",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 119,
"avg_line_length": 34.67704280155642,
"alnum_prop": 0.5966674147217235,
"repo_name": "alexmoratalla/yambo-py",
"id": "56e0e2b861245c729004cc30bc37c435b52e546a",
"size": "17998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yambopy/recipes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "169"
},
{
"name": "Python",
"bytes": "131970"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
} |
def decorate(func):
print("in decorate function, decorating", func.__name__)
def wrapper_func(*args):
print("Executing", func.__name__)
return func(*args)
return wrapper_func
def myfunction(parameter):
print(parameter)
if __name__ == "__main__":
myfunction = decorate(myfunction)
print(myfunction)
myfunction("hello")
exit(0)
| {
"content_hash": "e90fd7e310d5724da29010fe48a2ff18",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 21.72222222222222,
"alnum_prop": 0.6035805626598465,
"repo_name": "mishka28/NYU-Python",
"id": "98d1c83051d36ce8778953b7e611a5705d51b0a1",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "programming_with_pythong_class2/session_2/intro_decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "43267"
},
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "100728"
},
{
"name": "Shell",
"bytes": "7729"
},
{
"name": "Vim script",
"bytes": "719"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussion', '0009_auto_20160623_1800'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='size',
field=models.FloatField(default=0, blank=True),
),
]
| {
"content_hash": "0c83c28d1d7d3ea196e1599419b53d0e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.5994897959183674,
"repo_name": "BuildmLearn/University-Campus-Portal-UCP",
"id": "93c24982bb79c17f7f8acf3ee0d167aca85172a6",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UCP/discussion/migrations/0010_auto_20160623_1841.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91894"
},
{
"name": "HTML",
"bytes": "99158"
},
{
"name": "JavaScript",
"bytes": "1069551"
},
{
"name": "Python",
"bytes": "137868"
}
],
"symlink_target": ""
} |
"""Test some model methods"""
import pytest
from tracked_model.defs import REQUEST_CACHE_FIELD
from tracked_model.models import RequestInfo, History
from tests.models import BasicModel
pytestmark = pytest.mark.django_db
def test_history_str_and_get_object_current_instance():
"""Test ``History.get_object_current_instance"""
model = BasicModel.objects.create(some_num=42, some_txt='Cheese')
history = model.tracked_model_history
history_create = history().latest()
model.some_txt = 'Spam'
model.save()
history_change = history().latest()
assert history_create != history_change
obj1 = history_create.get_current_object()
obj2 = history_change.get_current_object()
assert obj1 == obj2
assert 'BasicModel' in str(history_create)
model.delete()
with pytest.raises(BasicModel.DoesNotExist):
history_create.get_current_object()
def test_request_info_cached(rf):
"""Tests ``RequestInfo`` being cached for single request"""
request = rf.get('/')
req_info1 = RequestInfo.create_or_get_from_request(request)
req_info2 = RequestInfo.create_or_get_from_request(request)
assert req_info1 == req_info2
delattr(request, REQUEST_CACHE_FIELD)
req_info3 = RequestInfo.create_or_get_from_request(request)
assert req_info3 != req_info1
req_info4 = RequestInfo.create_or_get_from_request(request)
assert req_info3 == req_info4
def test_materialize():
"""Tests ``History.materialize`` can restore object"""
txt1 = 'spam'
txt2 = 'ham'
txt3 = 'egg'
obj = BasicModel.objects.create(some_num=42, some_txt=txt1)
obj_pk = obj.pk
hist1 = obj.tracked_model_history().latest()
obj.some_txt = txt2
obj.save()
hist2 = obj.tracked_model_history().latest()
obj.some_txt = txt3
obj.save()
hist3 = obj.tracked_model_history().latest()
assert hist3.materialize().some_txt == txt3
assert hist2.materialize().some_txt == txt2
assert hist1.materialize().some_txt == txt1
obj.delete()
hist4 = History.objects.filter(
model_name='BasicModel', table_id=obj_pk).latest()
assert hist4.materialize().some_txt == txt3
with pytest.raises(BasicModel.DoesNotExist):
hist4.get_current_object()
assert BasicModel.objects.count() == 0
hist1.materialize().save()
assert BasicModel.objects.count() == 1
assert BasicModel.objects.first().some_txt == txt1
| {
"content_hash": "b4b9c06075373df8cbd6d102f5155efb",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 69,
"avg_line_length": 29.71951219512195,
"alnum_prop": 0.6836274107509233,
"repo_name": "ojake/django-tracked-model",
"id": "ce42613f4d4b9224fd52b9831e73a64ddce86f78",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "24775"
}
],
"symlink_target": ""
} |
"""
Create plugin manifest file will proper WordPress headings.
"""
import os, datetime
def create_manifest( config ):
"""
Create plugin manifest file will proper WordPress headings.
"""
cf = config['configuration']
pl = config['plugin']
f = open(cf['folder_name'] + "/" + cf['folder_name'] + ".php", 'w')
f.write("<?php\n/*\n")
f.write("Plugin Name: " + pl['name'] + "\n")
f.write("Plugin URI: " + pl['uri'] + "\n")
f.write("Description: " + pl['short_description'] + "\n")
f.write("Version: " + str(pl['version']) + "\n")
f.write("Author: " + pl['authors'] + "\n")
f.write("Author URI: " + pl['author_uri'] + "\n")
f.write("License: " + pl['license'] + "\n")
f.write("*/\n?>\n")
write_disclaimer(f, config)
add_plugin_manifest_template(f, config )
f.close();
def write_disclaimer(f, config):
cf = config['configuration']
pl = config['plugin']
now = datetime.datetime.now()
y = str(now.year)
f.write("<?php\n")
f.write("/*\n")
f.write(" Copyright " + y + " " + pl['authors'] + " (" + pl['author_emails'] + ")\n")
f.write(" This program is distributed in the hope that it will be useful,\n")
f.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n")
f.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n")
f.write("*/\n")
f.write("?>\n")
def add_plugin_manifest_template(f, config):
"""
Write plugin tamplate with standard functions used by most WordPress plugins plugins.
"""
cf = config['configuration']
pl = config['plugin']
f.write("<?php\n")
f.write("defined('WP_PLUGIN_URL') or die('Restricted access');\n\n")
f.write("global $wpdb;\n")
ucp = cf['unique_constant_prefix']
f.write("define('" + ucp + "PATH', ABSPATH.PLUGINDIR.'/"+ cf['folder_name']+"/');\n")
f.write("define('" + ucp + "URL', WP_PLUGIN_URL.'/"+ cf['folder_name']+"/');\n")
f.write("define('" + ucp + "ROUTE', get_bloginfo('url').'/?"+ cf['unique_function_prefix']+"routing=');\n")
f.write("require_once(ABSPATH.'wp-admin/includes/upgrade.php');\n")
f.write('require_once("lib/db_setup.php");\n')
f.write('require_once("lib/functions.php");\n')
f.write('require_once("admin/functions.php");\n')
if "widgets" in config:
include_widgets(f, config['widgets'])
f.write("\n\n\n\n")
f.write( css_include(config) )
f.write( js_include(config) )
f.write("?>")
f.close()
def include_widgets(f, widgets):
for w in widgets:
f.write('require_once("lib/class.' + w['unique_class_name'] + '.php");\n')
f.write('require_once("views/view.' + w['unique_class_name'] + '.php");\n')
f.write('require_once("admin/' + w['unique_class_name'] + '/form.' + w['unique_class_name'] + '.php");\n')
def css_include(config):
cf = config['configuration']
pl = config['plugin']
fn = cf["folder_name"]
ufp = cf['unique_function_prefix']
ucp = cf['unique_constant_prefix']
s = "/**\n * Register and enqueue frontend CSS\n */\n"
s += "function " + ufp + "stylesheets() {\n"
s += " if(!is_admin()){\n"
s += " wp_enqueue_style('" + fn + "-style', " + ucp + "URL.'assets/css/" + fn + ".css');\n"
s += " }\n"
s += "}add_action('wp_print_styles', '" + ufp + "stylesheets');\n\n\n"
return s
def js_include(config):
cf = config['configuration']
pl = config['plugin']
fn = cf["folder_name"]
ufp = cf['unique_function_prefix']
ucp = cf['unique_constant_prefix']
s = "/**\n * Register and enqueue frontend JavaScript\n */\n"
s += "function " + ufp + "js() {\n"
s += " if(!is_admin()){\n"
s += " wp_enqueue_script('jquery');\n"
s += " wp_enqueue_script('" + fn + "-js', " + ucp + "URL.'assets/js/" + fn + ".js');\n"
if "widgets" in config:
for w in config['widgets']:
s += " wp_enqueue_script('" + w['unique_class_name'] + "-js', " + ucp + "URL.'assets/js/" + w['unique_class_name'] + ".js');\n"
s += " }\n"
s += "}add_action('wp_enqueue_scripts', '" + ufp + "js');\n\n\n"
s += "/**\n * Register and enqueue admin JavaScript\n */\n"
s += "function " + ufp + "admin_js() {\n"
s += " wp_enqueue_script('jquery');\n"
s += " wp_enqueue_media();\n"
s += " wp_enqueue_style('thickbox');\n"
s += " wp_enqueue_script('media-upload');\n"
s += " wp_enqueue_script('thickbox');\n"
s += " wp_enqueue_script('" + fn + "-admin-js', " + ucp + "URL.'assets/js/" + fn + "-admin.js');\n"
if "widgets" in config:
for w in config['widgets']:
s += " wp_enqueue_script('" + w['unique_class_name'] + "-admin-js', " + ucp + "URL.'assets/js/" + w['unique_class_name'] + "-admin.js');\n"
s += "}add_action('admin_enqueue_scripts', '" + ufp + "admin_js');\n\n\n"
return s
| {
"content_hash": "9c5a9bc985dce283d60482d7351f35d3",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 146,
"avg_line_length": 34.992537313432834,
"alnum_prop": 0.5738963531669866,
"repo_name": "richardroyal/wp-plugin-generator",
"id": "353bd41c161940fbff599dfd3937fd0f4b40ed48",
"size": "4689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wp_plugin_generator/write_wp_manifest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18859"
}
],
"symlink_target": ""
} |
"""Utilities for identifying local IP addresses."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import re
import socket
from subprocess import Popen, PIPE
from warnings import warn
LOCAL_IPS = []
PUBLIC_IPS = []
LOCALHOST = ''
def _uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
maintaining the order in which they first appear.
From ipython_genutils.data
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def _get_output(cmd):
"""Get output of a command, raising IOError if it fails"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode:
raise IOError("Failed to run %s: %s" % (cmd, stderr.decode('utf8', 'replace')))
return stdout.decode('utf8', 'replace')
def _only_once(f):
"""decorator to only run a function once"""
f.called = False
def wrapped(**kwargs):
if f.called:
return
ret = f(**kwargs)
f.called = True
return ret
return wrapped
def _requires_ips(f):
"""decorator to ensure load_ips has been run before f"""
def ips_loaded(*args, **kwargs):
_load_ips()
return f(*args, **kwargs)
return ips_loaded
# subprocess-parsing ip finders
class NoIPAddresses(Exception):
pass
def _populate_from_list(addrs):
"""populate local and public IPs from flat list of all IPs"""
if not addrs:
raise NoIPAddresses
global LOCALHOST
public_ips = []
local_ips = []
for ip in addrs:
local_ips.append(ip)
if not ip.startswith('127.'):
public_ips.append(ip)
elif not LOCALHOST:
LOCALHOST = ip
if not LOCALHOST:
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(local_ips)
PUBLIC_IPS[:] = _uniq_stable(public_ips)
_ifconfig_ipv4_pat = re.compile(r'inet\b.*?(\d+\.\d+\.\d+\.\d+)', re.IGNORECASE)
def _load_ips_ifconfig():
"""load ip addresses from `ifconfig` output (posix)"""
try:
out = _get_output('ifconfig')
except (IOError, OSError):
# no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH
out = _get_output('/sbin/ifconfig')
lines = out.splitlines()
addrs = []
for line in lines:
m = _ifconfig_ipv4_pat.match(line.strip())
if m:
addrs.append(m.group(1))
_populate_from_list(addrs)
def _load_ips_ip():
"""load ip addresses from `ip addr` output (Linux)"""
out = _get_output(['ip', '-f', 'inet', 'addr'])
lines = out.splitlines()
addrs = []
for line in lines:
blocks = line.lower().split()
if (len(blocks) >= 2) and (blocks[0] == 'inet'):
addrs.append(blocks[1].split('/')[0])
_populate_from_list(addrs)
_ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE)
def _load_ips_ipconfig():
"""load ip addresses from `ipconfig` output (Windows)"""
out = _get_output('ipconfig')
lines = out.splitlines()
addrs = []
for line in lines:
m = _ipconfig_ipv4_pat.match(line.strip())
if m:
addrs.append(m.group(1))
_populate_from_list(addrs)
def _load_ips_netifaces():
"""load ip addresses with netifaces"""
import netifaces
global LOCALHOST
local_ips = []
public_ips = []
# list of iface names, 'lo0', 'eth0', etc.
for iface in netifaces.interfaces():
# list of ipv4 addrinfo dicts
ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])
for entry in ipv4s:
addr = entry.get('addr')
if not addr:
continue
if not (iface.startswith('lo') or addr.startswith('127.')):
public_ips.append(addr)
elif not LOCALHOST:
LOCALHOST = addr
local_ips.append(addr)
if not LOCALHOST:
# we never found a loopback interface (can this ever happen?), assume common default
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(local_ips)
PUBLIC_IPS[:] = _uniq_stable(public_ips)
def _load_ips_gethostbyname():
"""load ip addresses with socket.gethostbyname_ex
This can be slow.
"""
global LOCALHOST
try:
LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2]
except socket.error:
# assume common default
LOCAL_IPS[:] = ['127.0.0.1']
try:
hostname = socket.gethostname()
PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2]
# try hostname.local, in case hostname has been short-circuited to loopback
if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS):
PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2]
except socket.error:
pass
finally:
PUBLIC_IPS[:] = _uniq_stable(PUBLIC_IPS)
LOCAL_IPS.extend(PUBLIC_IPS)
# include all-interface aliases: 0.0.0.0 and ''
LOCAL_IPS.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = _uniq_stable(LOCAL_IPS)
LOCALHOST = LOCAL_IPS[0]
def _load_ips_dumb():
"""Fallback in case of unexpected failure"""
global LOCALHOST
LOCALHOST = '127.0.0.1'
LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', '']
PUBLIC_IPS[:] = []
@_only_once
def _load_ips(suppress_exceptions=True):
"""load the IPs that point to this machine
This function will only ever be called once.
It will use netifaces to do it quickly if available.
Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate.
Finally, it will fallback on socket.gethostbyname_ex, which can be slow.
"""
try:
# first priority, use netifaces
try:
return _load_ips_netifaces()
except ImportError:
pass
# second priority, parse subprocess output (how reliable is this?)
if os.name == 'nt':
try:
return _load_ips_ipconfig()
except (IOError, NoIPAddresses):
pass
else:
try:
return _load_ips_ip()
except (IOError, OSError, NoIPAddresses):
pass
try:
return _load_ips_ifconfig()
except (IOError, OSError, NoIPAddresses):
pass
# lowest priority, use gethostbyname
return _load_ips_gethostbyname()
except Exception as e:
if not suppress_exceptions:
raise
# unexpected error shouldn't crash, load dumb default values instead.
warn("Unexpected error discovering local network interfaces: %s" % e)
_load_ips_dumb()
@_requires_ips
def local_ips():
"""return the IP addresses that point to this machine"""
return LOCAL_IPS
@_requires_ips
def public_ips():
"""return the IP addresses for this machine that are visible to other machines"""
return PUBLIC_IPS
@_requires_ips
def localhost():
"""return ip for localhost (almost always 127.0.0.1)"""
return LOCALHOST
@_requires_ips
def is_local_ip(ip):
"""does `ip` point to this machine?"""
return ip in LOCAL_IPS
@_requires_ips
def is_public_ip(ip):
"""is `ip` a publicly visible address?"""
return ip in PUBLIC_IPS
| {
"content_hash": "614d8bc0474931d639450ddcf8dbd70c",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 97,
"avg_line_length": 28.520446096654275,
"alnum_prop": 0.5901981230448383,
"repo_name": "bdh1011/wau",
"id": "488650b5347d87bcb9a844503aabf2c07846c1c8",
"size": "7672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/jupyter_client/localinterfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
} |
from ConfigParser import SafeConfigParser
import errno
import logging
import os
import urllib2
class Config(object):
# S3 settings
AWS_ACCESS_KEY_CONFIG = ('aws', 'access_key', 'AWS_ACCESS_KEY')
AWS_SECRET_KEY_CONFIG = ('aws', 'secret_key', 'AWS_SECRET_KEY')
AWS_TEST_RESULT_BUCKET_CONFIG = ('aws', 'test_result_bucket', 'TEST_RESULT_BUCKET')
# MySQL settings
MYSQL_HOST_CONFIG = ('mysql', 'host', 'MYSQL_HOST')
MYSQL_PORT_CONFIG = ('mysql', 'port', 'MYSQL_PORT')
MYSQL_USER_CONFIG = ('mysql', 'user', 'MYSQL_USER')
MYSQL_PWD_CONFIG = ('mysql', 'password', 'MYSQL_PWD')
MYSQL_DB_CONFIG = ('mysql', 'database', 'MYSQL_DB')
# Isolate settings
ISOLATE_HOME_CONFIG = ('isolate', 'home', "ISOLATE_HOME")
ISOLATE_SERVER_CONFIG = ('isolate', 'server', "ISOLATE_SERVER")
ISOLATE_CACHE_DIR_CONFIG = ('isolate', 'cache_dir', "ISOLATE_CACHE_DIR")
# Beanstalk settings
BEANSTALK_HOST_CONFIG = ('beanstalk', 'host', 'BEANSTALK_HOST')
# Dist test settings
DIST_TEST_MASTER_CONFIG = ('dist_test', 'master', "DIST_TEST_MASTER")
DIST_TEST_JOB_PATH_CONFIG = ('dist_test', 'job_path', 'DIST_TEST_JOB_PATH')
DIST_TEST_USER_CONFIG = ('dist_test', 'user', 'DIST_TEST_USER')
DIST_TEST_PASSWORD_CONFIG = ('dist_test', 'password', 'DIST_TEST_PASSWORD')
DIST_TEST_URL_TIMEOUT_CONFIG = ('dist_test', 'url_timeout', 'DIST_TEST_URL_TIMEOUT')
def __init__(self, path=None):
if path is None:
path = os.getenv("DIST_TEST_CNF")
if path is None:
path = os.path.join(os.getenv("HOME"), ".dist_test.cnf")
logging.info("Reading configuration from %s", path)
# Populate parser with default values
defaults = {
"log_dir" : os.path.join(os.path.dirname(os.path.realpath(__file__)), "logs"),
"submit_gce_metrics" : "True",
"allowed_ip_ranges": "0.0.0.0/0",
"accounts": "{}",
}
self.config = SafeConfigParser(defaults)
self.config.read(path)
# Isolate settings
self.ISOLATE_HOME = self._get_with_env_override(*self.ISOLATE_HOME_CONFIG)
self.ISOLATE_SERVER = self._get_with_env_override(*self.ISOLATE_SERVER_CONFIG)
self.ISOLATE_CACHE_DIR = self._get_with_env_override(*self.ISOLATE_CACHE_DIR_CONFIG)
# S3 settings
self.AWS_ACCESS_KEY = self._get_with_env_override(*self.AWS_ACCESS_KEY_CONFIG)
self.AWS_SECRET_KEY = self._get_with_env_override(*self.AWS_SECRET_KEY_CONFIG)
self.AWS_TEST_RESULT_BUCKET = self._get_with_env_override(*self.AWS_TEST_RESULT_BUCKET_CONFIG)
# MySQL settings
self.MYSQL_HOST = self._get_with_env_override(*self.MYSQL_HOST_CONFIG)
try:
self.MYSQL_PORT = int(self._get_with_env_override(*self.MYSQL_PORT_CONFIG))
except:
self.MYSQL_PORT = 3306
self.MYSQL_USER = self._get_with_env_override(*self.MYSQL_USER_CONFIG)
self.MYSQL_PWD = self._get_with_env_override(*self.MYSQL_PWD_CONFIG)
self.MYSQL_DB = self._get_with_env_override(*self.MYSQL_DB_CONFIG)
# Beanstalk settings
self.BEANSTALK_HOST = self._get_with_env_override(*self.BEANSTALK_HOST_CONFIG)
# dist_test settings
if not self.config.has_section('dist_test'):
self.config.add_section('dist_test')
self.DIST_TEST_MASTER = self._get_with_env_override(*self.DIST_TEST_MASTER_CONFIG)
self.DIST_TEST_JOB_PATH = self._get_with_env_override(*self.DIST_TEST_JOB_PATH_CONFIG)
if self.DIST_TEST_JOB_PATH is None:
self.DIST_TEST_JOB_PATH = os.path.expanduser("~/.dist-test-last-job")
self.DIST_TEST_USER = self._get_with_env_override(*self.DIST_TEST_USER_CONFIG)
self.DIST_TEST_PASSWORD = self._get_with_env_override(*self.DIST_TEST_PASSWORD_CONFIG)
self.DIST_TEST_URL_TIMEOUT = self._get_with_env_override(*self.DIST_TEST_URL_TIMEOUT_CONFIG)
if self.DIST_TEST_URL_TIMEOUT is not None:
self.DIST_TEST_URL_TIMEOUT = float(self.DIST_TEST_URL_TIMEOUT)
# dist_test master configs (in the 'dist_test' section)
self.DIST_TEST_ALLOWED_IP_RANGES = self.config.get('dist_test', 'allowed_ip_ranges')
self.ACCOUNTS = self.config.get('dist_test', 'accounts')
self.log_dir = self.config.get('dist_test', 'log_dir')
# Make the log directory if it doesn't exist
Config.mkdir_p(self.log_dir)
self.SERVER_ACCESS_LOG = os.path.join(self.log_dir, "server-access.log")
self.SERVER_ERROR_LOG = os.path.join(self.log_dir, "server-error.log")
self.SERVER_LOG = os.path.join(self.log_dir, "server.log")
self.SLAVE_LOG = os.path.join(self.log_dir, "slave.log")
@staticmethod
def mkdir_p(path):
"""Similar to mkdir -p, make a directory ignoring EEXIST"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _get_with_env_override(self, section, option, env_key):
env_value = os.environ.get(env_key)
if env_value is not None:
return env_value
file_value = None
if self.config.has_option(section, option):
file_value = self.config.get(section, option)
return file_value
def ensure_aws_configured(self):
self._ensure_configs([self.AWS_ACCESS_KEY_CONFIG,
self.AWS_SECRET_KEY_CONFIG,
self.AWS_TEST_RESULT_BUCKET_CONFIG])
def ensure_isolate_configured(self):
self._ensure_configs([self.ISOLATE_HOME_CONFIG,
self.ISOLATE_SERVER_CONFIG,
self.ISOLATE_CACHE_DIR_CONFIG])
def ensure_mysql_configured(self):
self._ensure_configs([self.MYSQL_HOST_CONFIG,
self.MYSQL_USER_CONFIG,
self.MYSQL_PWD_CONFIG,
self.MYSQL_DB_CONFIG])
def ensure_beanstalk_configured(self):
self._ensure_configs([self.BEANSTALK_HOST_CONFIG])
def ensure_dist_test_configured(self):
self._ensure_configs([self.DIST_TEST_MASTER_CONFIG])
def _ensure_configs(self, configs):
for config in configs:
if self._get_with_env_override(*config) is None:
raise Exception(("Missing configuration %s.%s. Please set in the config file or " +
"set the environment variable %s.") % config)
def configure_auth(self):
"""
Configure urllib2 to pass authentication information if provided
in the configuration.
"""
if not self.DIST_TEST_USER:
return
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.DIST_TEST_MASTER,
self.DIST_TEST_USER, self.DIST_TEST_PASSWORD)
handler = urllib2.HTTPDigestAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
| {
"content_hash": "cedb9f1bace01594b7b795d0460f93e5",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 98,
"avg_line_length": 41.55625,
"alnum_prop": 0.6632576327267258,
"repo_name": "cloudera/dist_test",
"id": "f7111f05c84ed4cfc811daaea97b7ff27f4879ce",
"size": "6649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infra/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7520"
},
{
"name": "HTML",
"bytes": "2353607"
},
{
"name": "Java",
"bytes": "4574"
},
{
"name": "Python",
"bytes": "165921"
},
{
"name": "Shell",
"bytes": "14718"
}
],
"symlink_target": ""
} |
from django.conf import settings
from watchman.constants import DEFAULT_CHECKS, PAID_CHECKS
# TODO: these should not be module level (https://github.com/mwarkentin/django-watchman/issues/13)
WATCHMAN_ENABLE_PAID_CHECKS = getattr(settings, "WATCHMAN_ENABLE_PAID_CHECKS", False)
WATCHMAN_AUTH_DECORATOR = getattr(
settings, "WATCHMAN_AUTH_DECORATOR", "watchman.decorators.token_required"
)
# TODO: Remove for django-watchman 1.0
WATCHMAN_TOKEN = getattr(settings, "WATCHMAN_TOKEN", None)
WATCHMAN_TOKENS = getattr(settings, "WATCHMAN_TOKENS", None)
WATCHMAN_TOKEN_NAME = getattr(settings, "WATCHMAN_TOKEN_NAME", "watchman-token")
WATCHMAN_ERROR_CODE = getattr(settings, "WATCHMAN_ERROR_CODE", 500)
WATCHMAN_EMAIL_SENDER = getattr(
settings, "WATCHMAN_EMAIL_SENDER", "watchman@example.com"
)
WATCHMAN_EMAIL_RECIPIENTS = getattr(
settings, "WATCHMAN_EMAIL_RECIPIENTS", ["to@example.com"]
)
WATCHMAN_EMAIL_HEADERS = getattr(settings, "WATCHMAN_EMAIL_HEADERS", {})
WATCHMAN_CACHES = getattr(settings, "WATCHMAN_CACHES", settings.CACHES)
WATCHMAN_DATABASES = getattr(settings, "WATCHMAN_DATABASES", settings.DATABASES)
WATCHMAN_DISABLE_APM = getattr(settings, "WATCHMAN_DISABLE_APM", False)
WATCHMAN_STORAGE_PATH = getattr(settings, "WATCHMAN_STORAGE_PATH", settings.MEDIA_ROOT)
if WATCHMAN_ENABLE_PAID_CHECKS:
DEFAULT_CHECKS = DEFAULT_CHECKS + PAID_CHECKS
WATCHMAN_CHECKS = getattr(settings, "WATCHMAN_CHECKS", DEFAULT_CHECKS)
EXPOSE_WATCHMAN_VERSION = getattr(settings, "EXPOSE_WATCHMAN_VERSION", False)
| {
"content_hash": "dc1c5f836afef375459e7ff713dc78b5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 98,
"avg_line_length": 43.51428571428571,
"alnum_prop": 0.7708470124753776,
"repo_name": "mwarkentin/django-watchman",
"id": "35938d2b59dd9b1757db6a0d6cf1de8441e8f128",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "watchman/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "190"
},
{
"name": "HTML",
"bytes": "5808"
},
{
"name": "Makefile",
"bytes": "1316"
},
{
"name": "Python",
"bytes": "52607"
}
],
"symlink_target": ""
} |
"""
This script computes smatch score between two AMRs.
For detailed description of smatch, see http://www.isi.edu/natural-language/amr/smatch-13.pdf
"""
import amr
import os
import random
import sys
import time
# total number of iteration in smatch computation
iteration_num = 5
# verbose output switch.
# Default false (no verbose output)
verbose = False
# single score output switch.
# Default true (compute a single score for all AMRs in two files)
single_score = True
# precision and recall output switch.
# Default false (do not output precision and recall, just output F score)
pr_flag = False
# Error log location
ERROR_LOG = sys.stderr
# Debug log location
DEBUG_LOG = sys.stderr
# dictionary to save pre-computed node mapping and its resulting triple match count
# key: tuples of node mapping
# value: the matching triple count
match_triple_dict = {}
def get_amr_line(input_f):
"""
Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid
"""
cur_amr = []
has_content = False
for line in input_f:
line = line.strip()
if line == "":
if not has_content:
# empty lines before current AMR
continue
else:
# end of current AMR
break
if line.strip().startswith("#"):
# ignore the comment line (starting with "#") in the AMR file
continue
else:
has_content = True
cur_amr.append(line.strip())
return "".join(cur_amr)
def build_arg_parser():
"""
Build an argument parser using argparse. Use it when python version is 2.7 or later.
"""
parser = argparse.ArgumentParser(description="Smatch calculator -- arguments")
parser.add_argument('-f', nargs=2, required=True, type=argparse.FileType('r'),
help='Two files containing AMR pairs. AMRs in each file are separated by a single blank line')
parser.add_argument('-r', type=int, default=4, help='Restart number (Default:4)')
parser.add_argument('-v', action='store_true', help='Verbose output (Default:false)')
parser.add_argument('--ms', action='store_true', default=False,
help='Output multiple scores (one AMR pair a score)' \
'instead of a single document-level smatch score (Default: false)')
parser.add_argument('--pr', action='store_true', default=False,
help="Output precision and recall as well as the f-score. Default: false")
return parser
def build_arg_parser2():
"""
Build an argument parser using optparse. Use it when python version is 2.5 or 2.6.
"""
usage_str = "Smatch calculator -- arguments"
parser = optparse.OptionParser(usage=usage_str)
parser.add_option("-f", "--files", nargs=2, dest="f", type="string",
help='Two files containing AMR pairs. AMRs in each file are ' \
'separated by a single blank line. This option is required.')
parser.add_option("-r", "--restart", dest="r", type="int", help='Restart number (Default: 4)')
parser.add_option("-v", "--verbose", action='store_true', dest="v", help='Verbose output (Default:False)')
parser.add_option("--ms", "--multiple_score", action='store_true', dest="ms",
help='Output multiple scores (one AMR pair a score) instead of ' \
'a single document-level smatch score (Default: False)')
parser.add_option('--pr', "--precision_recall", action='store_true', dest="pr",
help="Output precision and recall as well as the f-score. Default: false")
parser.set_defaults(r=4, v=False, ms=False, pr=False)
return parser
def get_best_match(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2):
"""
Get the highest triple match number between two sets of triples via hill-climbing.
Arguments:
instance1: instance triples of AMR 1 ("instance", node name, node value)
attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value)
relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name)
instance2: instance triples of AMR 2 ("instance", node name, node value)
attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value)
relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name)
prefix1: prefix label for AMR 1
prefix2: prefix label for AMR 2
Returns:
best_match: the node mapping that results in the highest triple matching number
best_match_num: the highest triple matching number
"""
# Compute candidate pool - all possible node match candidates.
# In the hill-climbing, we only consider candidate in this pool to save computing time.
# weight_dict is a dictionary that maps a pair of node
(candidate_mappings, weight_dict) = compute_pool(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2)
if verbose:
print >> DEBUG_LOG, "Candidate mappings:"
print >> DEBUG_LOG, candidate_mappings
print >> DEBUG_LOG, "Weight dictionary"
print >> DEBUG_LOG, weight_dict
best_match_num = 0
# initialize best match mapping
# the ith entry is the node index in AMR 2 which maps to the ith node in AMR 1
best_mapping = [-1] * len(instance1)
for i in range(0, iteration_num):
if verbose:
print >> DEBUG_LOG, "Iteration", i
if i == 0:
# smart initialization used for the first round
cur_mapping = smart_init_mapping(candidate_mappings, instance1, instance2)
else:
# random initialization for the other round
cur_mapping = random_init_mapping(candidate_mappings)
# compute current triple match number
match_num = compute_match(cur_mapping, weight_dict)
if verbose:
print >> DEBUG_LOG, "Node mapping at start", cur_mapping
print >> DEBUG_LOG, "Triple match number at start:", match_num
while True:
# get best gain
(gain, new_mapping) = get_best_gain(cur_mapping, candidate_mappings, weight_dict,
len(instance2), match_num)
if verbose:
print >> DEBUG_LOG, "Gain after the hill-climbing", gain
# hill-climbing until there will be no gain for new node mapping
if gain <= 0:
break
# otherwise update match_num and mapping
match_num += gain
cur_mapping = new_mapping[:]
if verbose:
print >> DEBUG_LOG, "Update triple match number to:", match_num
print >> DEBUG_LOG, "Current mapping:", cur_mapping
if match_num > best_match_num:
best_mapping = cur_mapping[:]
best_match_num = match_num
return best_mapping, best_match_num
def compute_pool(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2):
"""
compute all possible node mapping candidates and their weights (the triple matching number gain resulting from
mapping one node in AMR 1 to another node in AMR2)
Arguments:
instance1: instance triples of AMR 1
attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value)
relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name)
instance2: instance triples of AMR 2
attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value)
relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name
prefix1: prefix label for AMR 1
prefix2: prefix label for AMR 2
Returns:
candidate_mapping: a list of candidate nodes.
The ith element contains the node indices (in AMR 2) the ith node (in AMR 1) can map to.
(resulting in non-zero triple match)
weight_dict: a dictionary which contains the matching triple number for every pair of node mapping. The key
is a node pair. The value is another dictionary. key {-1} is triple match resulting from this node
pair alone (instance triples and attribute triples), and other keys are node pairs that can result
in relation triple match together with the first node pair.
"""
candidate_mapping = []
weight_dict = {}
for i in range(0, len(instance1)):
# each candidate mapping is a set of node indices
candidate_mapping.append(set())
for j in range(0, len(instance2)):
# if both triples are instance triples and have the same value
if instance1[i][0].lower() == instance2[j][0].lower() \
and instance1[i][2].lower() == instance2[j][2].lower():
# get node index by stripping the prefix
node1_index = int(instance1[i][1][len(prefix1):])
node2_index = int(instance2[j][1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
for i in range(0, len(attribute1)):
for j in range(0, len(attribute2)):
# if both attribute relation triple have the same relation name and value
if attribute1[i][0].lower() == attribute2[j][0].lower() \
and attribute1[i][2].lower() == attribute2[j][2].lower():
node1_index = int(attribute1[i][1][len(prefix1):])
node2_index = int(attribute2[j][1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
for i in range(0, len(relation1)):
for j in range(0, len(relation2)):
# if both relation share the same name
if relation1[i][0].lower() == relation2[j][0].lower():
node1_index_amr1 = int(relation1[i][1][len(prefix1):])
node1_index_amr2 = int(relation2[j][1][len(prefix2):])
node2_index_amr1 = int(relation1[i][2][len(prefix1):])
node2_index_amr2 = int(relation2[j][2][len(prefix2):])
# add mapping between two nodes
candidate_mapping[node1_index_amr1].add(node1_index_amr2)
candidate_mapping[node2_index_amr1].add(node2_index_amr2)
node_pair1 = (node1_index_amr1, node1_index_amr2)
node_pair2 = (node2_index_amr1, node2_index_amr2)
if node_pair2 != node_pair1:
# update weight_dict weight. Note that we need to update both entries for future search
# i.e weight_dict[node_pair1][node_pair2]
# weight_dict[node_pair2][node_pair1]
if node1_index_amr1 > node2_index_amr1:
# swap node_pair1 and node_pair2
node_pair1 = (node2_index_amr1, node2_index_amr2)
node_pair2 = (node1_index_amr1, node1_index_amr2)
if node_pair1 in weight_dict:
if node_pair2 in weight_dict[node_pair1]:
weight_dict[node_pair1][node_pair2] += 1
else:
weight_dict[node_pair1][node_pair2] = 1
else:
weight_dict[node_pair1] = {}
weight_dict[node_pair1][-1] = 0
weight_dict[node_pair1][node_pair2] = 1
if node_pair2 in weight_dict:
if node_pair1 in weight_dict[node_pair2]:
weight_dict[node_pair2][node_pair1] += 1
else:
weight_dict[node_pair2][node_pair1] = 1
else:
weight_dict[node_pair2] = {}
weight_dict[node_pair2][-1] = 0
weight_dict[node_pair2][node_pair1] = 1
else:
# two node pairs are the same. So we only update weight_dict once.
# this generally should not happen.
if node_pair1 in weight_dict:
weight_dict[node_pair1][-1] += 1
else:
weight_dict[node_pair1] = {}
weight_dict[node_pair1][-1] = 1
return candidate_mapping, weight_dict
def smart_init_mapping(candidate_mapping, instance1, instance2):
"""
Initialize mapping based on the concept mapping (smart initialization)
Arguments:
candidate_mapping: candidate node match list
instance1: instance triples of AMR 1
instance2: instance triples of AMR 2
Returns:
initialized node mapping between two AMRs
"""
random.seed()
matched_dict = {}
result = []
# list to store node indices that have no concept match
no_word_match = []
for i, candidates in enumerate(candidate_mapping):
if len(candidates) == 0:
# no possible mapping
result.append(-1)
continue
# node value in instance triples of AMR 1
value1 = instance1[i][2]
for node_index in candidates:
value2 = instance2[node_index][2]
# find the first instance triple match in the candidates
# instance triple match is having the same concept value
if value1 == value2:
if node_index not in matched_dict:
result.append(node_index)
matched_dict[node_index] = 1
break
if len(result) == i:
no_word_match.append(i)
result.append(-1)
# if no concept match, generate a random mapping
for i in no_word_match:
candidates = list(candidate_mapping[i])
while len(candidates) > 0:
# get a random node index from candidates
rid = random.randint(0, len(candidates) - 1)
if candidates[rid] in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidates[rid]] = 1
result[i] = candidates[rid]
break
return result
def random_init_mapping(candidate_mapping):
"""
Generate a random node mapping.
Args:
candidate_mapping: candidate_mapping: candidate node match list
Returns:
randomly-generated node mapping between two AMRs
"""
# if needed, a fixed seed could be passed here to generate same random (to help debugging)
random.seed()
matched_dict = {}
result = []
for c in candidate_mapping:
candidates = list(c)
if len(candidates) == 0:
# -1 indicates no possible mapping
result.append(-1)
continue
found = False
while len(candidates) > 0:
# randomly generate an index in [0, length of candidates)
rid = random.randint(0, len(candidates) - 1)
# check if it has already been matched
if candidates[rid] in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidates[rid]] = 1
result.append(candidates[rid])
found = True
break
if not found:
result.append(-1)
return result
def compute_match(mapping, weight_dict):
"""
Given a node mapping, compute match number based on weight_dict.
Args:
mappings: a list of node index in AMR 2. The ith element (value j) means node i in AMR 1 maps to node j in AMR 2.
Returns:
matching triple number
Complexity: O(m*n) , m is the node number of AMR 1, n is the node number of AMR 2
"""
# If this mapping has been investigated before, retrieve the value instead of re-computing.
if verbose:
print >> DEBUG_LOG, "Computing match for mapping"
print >> DEBUG_LOG, mapping
if tuple(mapping) in match_triple_dict:
if verbose:
print >> DEBUG_LOG, "saved value", match_triple_dict[tuple(mapping)]
return match_triple_dict[tuple(mapping)]
match_num = 0
# i is node index in AMR 1, m is node index in AMR 2
for i, m in enumerate(mapping):
if m == -1:
# no node maps to this node
continue
# node i in AMR 1 maps to node m in AMR 2
current_node_pair = (i, m)
if current_node_pair not in weight_dict:
continue
if verbose:
print >> DEBUG_LOG, "node_pair", current_node_pair
for key in weight_dict[current_node_pair]:
if key == -1:
# matching triple resulting from instance/attribute triples
match_num += weight_dict[current_node_pair][key]
if verbose:
print >> DEBUG_LOG, "instance/attribute match", weight_dict[current_node_pair][key]
# only consider node index larger than i to avoid duplicates
# as we store both weight_dict[node_pair1][node_pair2] and
# weight_dict[node_pair2][node_pair1] for a relation
elif key[0] < i:
continue
elif mapping[key[0]] == key[1]:
match_num += weight_dict[current_node_pair][key]
if verbose:
print >> DEBUG_LOG, "relation match with", key, weight_dict[current_node_pair][key]
if verbose:
print >> DEBUG_LOG, "match computing complete, result:", match_num
# update match_triple_dict
match_triple_dict[tuple(mapping)] = match_num
return match_num
def move_gain(mapping, node_id, old_id, new_id, weight_dict, match_num):
"""
Compute the triple match number gain from the move operation
Arguments:
mapping: current node mapping
node_id: remapped node in AMR 1
old_id: original node id in AMR 2 to which node_id is mapped
new_id: new node in to which node_id is mapped
weight_dict: weight dictionary
match_num: the original triple matching number
Returns:
the triple match gain number (might be negative)
"""
# new node mapping after moving
new_mapping = (node_id, new_id)
# node mapping before moving
old_mapping = (node_id, old_id)
# new nodes mapping list (all node pairs)
new_mapping_list = mapping[:]
new_mapping_list[node_id] = new_id
# if this mapping is already been investigated, use saved one to avoid duplicate computing
if tuple(new_mapping_list) in match_triple_dict:
return match_triple_dict[tuple(new_mapping_list)] - match_num
gain = 0
# add the triple match incurred by new_mapping to gain
if new_mapping in weight_dict:
for key in weight_dict[new_mapping]:
if key == -1:
# instance/attribute triple match
gain += weight_dict[new_mapping][-1]
elif new_mapping_list[key[0]] == key[1]:
# relation gain incurred by new_mapping and another node pair in new_mapping_list
gain += weight_dict[new_mapping][key]
# deduct the triple match incurred by old_mapping from gain
if old_mapping in weight_dict:
for k in weight_dict[old_mapping]:
if k == -1:
gain -= weight_dict[old_mapping][-1]
elif mapping[k[0]] == k[1]:
gain -= weight_dict[old_mapping][k]
# update match number dictionary
match_triple_dict[tuple(new_mapping_list)] = match_num + gain
return gain
def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num):
"""
Compute the triple match number gain from the swapping
Arguments:
mapping: current node mapping list
node_id1: node 1 index in AMR 1
mapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping)
node_id2: node 2 index in AMR 1
mapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping)
weight_dict: weight dictionary
match_num: the original matching triple number
Returns:
the gain number (might be negative)
"""
new_mapping_list = mapping[:]
# Before swapping, node_id1 maps to mapping_id1, and node_id2 maps to mapping_id2
# After swapping, node_id1 maps to mapping_id2 and node_id2 maps to mapping_id1
new_mapping_list[node_id1] = mapping_id2
new_mapping_list[node_id2] = mapping_id1
if tuple(new_mapping_list) in match_triple_dict:
return match_triple_dict[tuple(new_mapping_list)] - match_num
gain = 0
new_mapping1 = (node_id1, mapping_id2)
new_mapping2 = (node_id2, mapping_id1)
old_mapping1 = (node_id1, mapping_id1)
old_mapping2 = (node_id2, mapping_id2)
if node_id1 > node_id2:
new_mapping2 = (node_id1, mapping_id2)
new_mapping1 = (node_id2, mapping_id1)
old_mapping1 = (node_id2, mapping_id2)
old_mapping2 = (node_id1, mapping_id1)
if new_mapping1 in weight_dict:
for key in weight_dict[new_mapping1]:
if key == -1:
gain += weight_dict[new_mapping1][-1]
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping1][key]
if new_mapping2 in weight_dict:
for key in weight_dict[new_mapping2]:
if key == -1:
gain += weight_dict[new_mapping2][-1]
# to avoid duplicate
elif key[0] == node_id1:
continue
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping2][key]
if old_mapping1 in weight_dict:
for key in weight_dict[old_mapping1]:
if key == -1:
gain -= weight_dict[old_mapping1][-1]
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping1][key]
if old_mapping2 in weight_dict:
for key in weight_dict[old_mapping2]:
if key == -1:
gain -= weight_dict[old_mapping2][-1]
# to avoid duplicate
elif key[0] == node_id1:
continue
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping2][key]
match_triple_dict[tuple(new_mapping_list)] = match_num + gain
return gain
def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num):
"""
Hill-climbing method to return the best gain swap/move can get
Arguments:
mapping: current node mapping
candidate_mappings: the candidates mapping list
weight_dict: the weight dictionary
instance_len: the number of the nodes in AMR 2
cur_match_num: current triple match number
Returns:
the best gain we can get via swap/move operation
"""
largest_gain = 0
# True: using swap; False: using move
use_swap = True
# the node to be moved/swapped
node1 = None
# store the other node affected. In swap, this other node is the node swapping with node1. In move, this other
# node is the node node1 will move to.
node2 = None
# unmatched nodes in AMR 2
unmatched = set(range(0, instance_len))
# exclude nodes in current mapping
# get unmatched nodes
for nid in mapping:
if nid in unmatched:
unmatched.remove(nid)
for i, nid in enumerate(mapping):
# current node i in AMR 1 maps to node nid in AMR 2
for nm in unmatched:
if nm in candidate_mappings[i]:
# remap i to another unmatched node (move)
# (i, m) -> (i, nm)
if verbose:
print >> DEBUG_LOG, "Remap node", i, "from ", nid, "to", nm
mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num)
if verbose:
print >> DEBUG_LOG, "Move gain:", mv_gain
new_mapping = mapping[:]
new_mapping[i] = nm
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + mv_gain:
print >> ERROR_LOG, mapping, new_mapping
print >> ERROR_LOG, "Inconsistency in computing: move gain", cur_match_num, mv_gain, \
new_match_num
if mv_gain > largest_gain:
largest_gain = mv_gain
node1 = i
node2 = nm
use_swap = False
# compute swap gain
for i, m in enumerate(mapping):
for j in range(i+1, len(mapping)):
m2 = mapping[j]
# swap operation (i, m) (j, m2) -> (i, m2) (j, m)
# j starts from i+1, to avoid duplicate swap
if verbose:
print >> DEBUG_LOG, "Swap node", i, "and", j
print >> DEBUG_LOG, "Before swapping:", i, "-", m, ",", j, "-", m2
print >> DEBUG_LOG, mapping
print >> DEBUG_LOG, "After swapping:", i, "-", m2, ",", j, "-", m
sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num)
if verbose:
print >> DEBUG_LOG, "Swap gain:", sw_gain
new_mapping = mapping[:]
new_mapping[i] = m2
new_mapping[j] = m
print >> DEBUG_LOG, new_mapping
new_match_num = compute_match(new_mapping, weight_dict)
if new_match_num != cur_match_num + sw_gain:
print >> ERROR_LOG, match, new_match
print >> ERROR_LOG, "Inconsistency in computing: swap gain", cur_match_num, sw_gain, new_match_num
if sw_gain > largest_gain:
largest_gain = sw_gain
node1 = i
node2 = j
use_swap = True
# generate a new mapping based on swap/move
cur_mapping = mapping[:]
if node1 is not None:
if use_swap:
if verbose:
print >> DEBUG_LOG, "Use swap gain"
temp = cur_mapping[node1]
cur_mapping[node1] = cur_mapping[node2]
cur_mapping[node2] = temp
else:
if verbose:
print >> DEBUG_LOG, "Use move gain"
cur_mapping[node1] = node2
else:
if verbose:
print >> DEBUG_LOG, "no move/swap gain found"
if verbose:
print >> DEBUG_LOG, "Original mapping", mapping
print >> DEBUG_LOG, "Current mapping", cur_mapping
return largest_gain, cur_mapping
def print_alignment(mapping, instance1, instance2):
"""
print the alignment based on a node mapping
Args:
match: current node mapping list
instance1: nodes of AMR 1
instance2: nodes of AMR 2
"""
result = []
for i, m in enumerate(mapping):
if m == -1:
result.append(instance1[i][1] + "(" + instance1[i][2] + ")" + "-Null")
else:
result.append(instance1[i][1] + "(" + instance1[i][2] + ")" + "-"
+ instance2[m][1] + "(" + instance2[m][2] + ")")
return " ".join(result)
def compute_f(match_num, test_num, gold_num):
"""
Compute the f-score based on the matching triple number,
triple number of AMR set 1,
triple number of AMR set 2
Args:
match_num: matching triple number
test_num: triple number of AMR 1 (test file)
gold_num: triple number of AMR 2 (gold file)
Returns:
precision: match_num/test_num
recall: match_num/gold_num
f_score: 2*precision*recall/(precision+recall)
"""
if test_num == 0 or gold_num == 0:
return 0.00, 0.00, 0.00
precision = float(match_num) / float(test_num)
recall = float(match_num) / float(gold_num)
if (precision + recall) != 0:
f_score = 2 * precision * recall / (precision + recall)
if verbose:
print >> DEBUG_LOG, "F-score:", f_score
return precision, recall, f_score
else:
if verbose:
print >> DEBUG_LOG, "F-score:", "0.0"
return precision, recall, 0.00
def main(arguments):
"""
Main function of smatch score calculation
"""
global verbose
global iteration_num
global single_score
global pr_flag
global match_triple_dict
# set the iteration number
# total iteration number = restart number + 1
iteration_num = arguments.r + 1
if arguments.ms:
single_score = False
if arguments.v:
verbose = True
if arguments.pr:
pr_flag = True
# matching triple number
total_match_num = 0
# triple number in test file
total_test_num = 0
# triple number in gold file
total_gold_num = 0
# sentence number
sent_num = 1
# Read amr pairs from two files
while True:
cur_amr1 = get_amr_line(args.f[0])
cur_amr2 = get_amr_line(args.f[1])
if cur_amr1 == "" and cur_amr2 == "":
break
if cur_amr1 == "":
print >> ERROR_LOG, "Error: File 1 has less AMRs than file 2"
print >> ERROR_LOG, "Ignoring remaining AMRs"
break
if cur_amr2 == "":
print >> ERROR_LOG, "Error: File 2 has less AMRs than file 1"
print >> ERROR_LOG, "Ignoring remaining AMRs"
break
try:
amr1 = amr.AMR.parse_AMR_line(cur_amr1)
amr2 = amr.AMR.parse_AMR_line(cur_amr2)
except:
print cur_amr1
sys.stdout.flush()
sys.exit(0)
prefix1 = "a"
prefix2 = "b"
try:
# Rename node to "a1", "a2", .etc
amr1.rename_node(prefix1)
# Renaming node to "b1", "b2", .etc
amr2.rename_node(prefix2)
except:
print cur_amr1
sys.stdout.flush()
sys.exit(0)
(instance1, attributes1, relation1) = amr1.get_triples()
(instance2, attributes2, relation2) = amr2.get_triples()
if verbose:
# print parse results of two AMRs
print >> DEBUG_LOG, "AMR pair", sent_num
print >> DEBUG_LOG, "============================================"
print >> DEBUG_LOG, "AMR 1 (one-line):", cur_amr1
print >> DEBUG_LOG, "AMR 2 (one-line):", cur_amr2
print >> DEBUG_LOG, "Instance triples of AMR 1:", len(instance1)
print >> DEBUG_LOG, instance1
print >> DEBUG_LOG, "Attribute triples of AMR 1:", len(attributes1)
print >> DEBUG_LOG, attributes1
print >> DEBUG_LOG, "Relation triples of AMR 1:", len(relation1)
print >> DEBUG_LOG, relation1
print >> DEBUG_LOG, "Instance triples of AMR 2:", len(instance2)
print >> DEBUG_LOG, instance2
print >> DEBUG_LOG, "Attribute triples of AMR 2:", len(attributes2)
print >> DEBUG_LOG, attributes2
print >> DEBUG_LOG, "Relation triples of AMR 2:", len(relation2)
print >> DEBUG_LOG, relation2
(best_mapping, best_match_num) = get_best_match(instance1, attributes1, relation1,
instance2, attributes2, relation2,
prefix1, prefix2)
if verbose:
print >> DEBUG_LOG, "best match number", best_match_num
print >> DEBUG_LOG, "best node mapping", best_mapping
print >> DEBUG_LOG, "Best node mapping alignment:", print_alignment(best_mapping, instance1, instance2)
test_triple_num = len(instance1) + len(attributes1) + len(relation1)
gold_triple_num = len(instance2) + len(attributes2) + len(relation2)
if not single_score:
# if each AMR pair should have a score, compute and output it here
(precision, recall, best_f_score) = compute_f(best_match_num,
test_triple_num,
gold_triple_num)
#print "Sentence", sent_num
if pr_flag:
print "Precision: %.2f" % precision
print "Recall: %.2f" % recall
# print "Smatch score: %.2f" % best_f_score
print "%.4f" % best_f_score
total_match_num += best_match_num
total_test_num += test_triple_num
total_gold_num += gold_triple_num
# clear the matching triple dictionary for the next AMR pair
match_triple_dict.clear()
sent_num += 1
if verbose:
print >> DEBUG_LOG, "Total match number, total triple number in AMR 1, and total triple number in AMR 2:"
print >> DEBUG_LOG, total_match_num, total_test_num, total_gold_num
print >> DEBUG_LOG, "---------------------------------------------------------------------------------"
# output document-level smatch score (a single f-score for all AMR pairs in two files)
if single_score:
(precision, recall, best_f_score) = compute_f(total_match_num, total_test_num, total_gold_num)
if pr_flag:
print "Precision: %.2f" % precision
print "Recall: %.2f" % recall
print "Document F-score: %.2f" % best_f_score
args.f[0].close()
args.f[1].close()
if __name__ == "__main__":
parser = None
args = None
# only support python version 2.5 or later
if sys.version_info[0] != 2 or sys.version_info[1] < 5:
print >> ERROR_LOG, "This script only supports python 2.5 or later. \
It does not support python 3.x."
exit(1)
# use optparse if python version is 2.5 or 2.6
if sys.version_info[1] < 7:
import optparse
if len(sys.argv) == 1:
print >> ERROR_LOG, "No argument given. Please run smatch.py -h \
to see the argument description."
exit(1)
parser = build_arg_parser2()
(args, opts) = parser.parse_args()
file_handle = []
if args.f is None:
print >> ERROR_LOG, "smatch.py requires -f option to indicate two files \
containing AMR as input. Please run smatch.py -h to \
see the argument description."
exit(1)
# assert there are 2 file names following -f.
assert(len(args.f) == 2)
for file_path in args.f:
if not os.path.exists(file_path):
print >> ERROR_LOG, "Given file", args.f[0], "does not exist"
exit(1)
file_handle.append(open(file_path))
# use opened files
args.f = tuple(file_handle)
# use argparse if python version is 2.7 or later
else:
import argparse
parser = build_arg_parser()
args = parser.parse_args()
main(args)
| {
"content_hash": "50f17f9680adafa797ef66dbec4120f3",
"timestamp": "",
"source": "github",
"line_count": 843,
"max_line_length": 118,
"avg_line_length": 42.852906287069985,
"alnum_prop": 0.5673910034602077,
"repo_name": "masterkeywikz/seq2graph",
"id": "1d22f9df5dfed5b0f3f0ed64531b232f0b294e05",
"size": "36172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smatch_2.0.2/smatch.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "10200"
},
{
"name": "Makefile",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "1082495"
},
{
"name": "Shell",
"bytes": "3567"
}
],
"symlink_target": ""
} |
import logging
from tornado import web
from tornado import gen
from urllib2 import URLError
from UtilityLayer import *
from concurrent.futures import ThreadPoolExecutor
class RequestHandler(web.RequestHandler):
INPUT = INPUT()
OUTPUT = OUTPUT()
RUNTIME = RUNTIME()
BFLY_CONFIG = BFLY_CONFIG
def initialize(self, _core, _db):
self._ex = ThreadPoolExecutor(max_workers=10)
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', 'GET')
self._core = _core
self._db = _db
# Each Handler must define
def parse(self, _request):
pass
@gen.coroutine
def get(self, *args):
try:
query = self.parse(*args)
yield self._ex.submit(self.handle, query)
except URLError, u_error:
# Get error information
details = u_error.args[0]
self.set_status(int(details.get('http',500)))
self.set_header('Content-Type', 'text/plain')
self.write(self.log(details))
def check(self, _query):
return _query
def handle(self, _query):
this_method = _query.INPUT.METHODS.VALUE
self.set_header('Content-Type',_query.mime_type)
if _query.is_data:
content = self._core.get_data(_query)
else:
content = self._core.get_info(_query)
# Return content
self.write(content)
return content
def log(self, detail):
errors = self.RUNTIME.ERROR
# Get some global strings
k_check = errors.CHECK.NAME
k_term = errors.TERM.NAME
k_out = errors.OUT.NAME
statuses = {
'bad_check': 'info'
}
actions = {
'bad_check': '''The {{{}}} {{{}}} is not {{{}}}
'''.format(k_term, k_out, k_check)
}
# Get error info and type
keys = detail.get('keys',{})
action = detail.get('error','')
# Get the log status and template
status = statuses.get(action,'error')
template = actions.get(action,'New error')
# Try to format the sting
try:
message = template.format(**keys)
except KeyError:
message = template
# Log the message with a status
getattr(logging, status)(message)
return message
| {
"content_hash": "895d235f00bf6260fd7b36a04fe0d797",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 62,
"avg_line_length": 30.487179487179485,
"alnum_prop": 0.5685449957947856,
"repo_name": "Rhoana/butterfly2",
"id": "d4a04298c70683db05b5a30fad9d42235672b836",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "butterfly/AccessLayer/RequestHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64079"
}
],
"symlink_target": ""
} |
import inspect
import os
import runpy
import specs
# -------------------------------------------------------------------------------------------------
def fixpath(_path, _opts):
return os.path.normpath(os.path.join(_opts.pathPrefix, _path))
# -------------------------------------------------------------------------------------------------
def include(_path):
cwd = os.getcwd()
try:
os.chdir(os.path.dirname(os.path.abspath(_path)))
return _includeInternal(_path)
except IOError:
return _includeInternal(_path + ".ab")
finally:
os.chdir(cwd)
# -------------------------------------------------------------------------------------------------
def _includeInternal(_path):
initGlobals = specs.getProjectGroupDict()
initGlobals['include'] = include
ignoreClasses = [c for c in initGlobals.itervalues() if inspect.isclass(c)]
mod = runpy.run_path(_path, initGlobals)
filteredMod = {}
for k, v in mod.iteritems():
if not inspect.isclass(v) or v not in ignoreClasses:
filteredMod[k] = v
return filteredMod
| {
"content_hash": "4065790e238af3d853a6256bd3e65271",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 99,
"avg_line_length": 30.805555555555557,
"alnum_prop": 0.47520288548241657,
"repo_name": "daVinci1980/antebuild",
"id": "ec553188d7f5cd2ed76b9ae7ebd1d430c90d65a3",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internals/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "809"
},
{
"name": "Objective-C",
"bytes": "215"
},
{
"name": "Python",
"bytes": "36101"
}
],
"symlink_target": ""
} |
from enum import Enum
class RouteNextHopType(Enum):
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
internet = "Internet"
virtual_appliance = "VirtualAppliance"
none = "None"
class SecurityRuleProtocol(Enum):
tcp = "Tcp"
udp = "Udp"
asterisk = "*"
class SecurityRuleAccess(Enum):
allow = "Allow"
deny = "Deny"
class SecurityRuleDirection(Enum):
inbound = "Inbound"
outbound = "Outbound"
class TransportProtocol(Enum):
udp = "Udp"
tcp = "Tcp"
class IPAllocationMethod(Enum):
static = "Static"
dynamic = "Dynamic"
class IPVersion(Enum):
ipv4 = "IPv4"
ipv6 = "IPv6"
class ApplicationGatewayProtocol(Enum):
http = "Http"
https = "Https"
class ApplicationGatewayCookieBasedAffinity(Enum):
enabled = "Enabled"
disabled = "Disabled"
class ApplicationGatewayBackendHealthServerHealth(Enum):
unknown = "Unknown"
up = "Up"
down = "Down"
partial = "Partial"
class ApplicationGatewaySkuName(Enum):
standard_small = "Standard_Small"
standard_medium = "Standard_Medium"
standard_large = "Standard_Large"
waf_medium = "WAF_Medium"
waf_large = "WAF_Large"
class ApplicationGatewayTier(Enum):
standard = "Standard"
waf = "WAF"
class ApplicationGatewaySslProtocol(Enum):
tl_sv1_0 = "TLSv1_0"
tl_sv1_1 = "TLSv1_1"
tl_sv1_2 = "TLSv1_2"
class ApplicationGatewayRequestRoutingRuleType(Enum):
basic = "Basic"
path_based_routing = "PathBasedRouting"
class ApplicationGatewayOperationalState(Enum):
stopped = "Stopped"
starting = "Starting"
running = "Running"
stopping = "Stopping"
class ApplicationGatewayFirewallMode(Enum):
detection = "Detection"
prevention = "Prevention"
class AuthorizationUseStatus(Enum):
available = "Available"
in_use = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(Enum):
not_configured = "NotConfigured"
configuring = "Configuring"
configured = "Configured"
validation_needed = "ValidationNeeded"
class ExpressRouteCircuitPeeringType(Enum):
azure_public_peering = "AzurePublicPeering"
azure_private_peering = "AzurePrivatePeering"
microsoft_peering = "MicrosoftPeering"
class ExpressRouteCircuitPeeringState(Enum):
disabled = "Disabled"
enabled = "Enabled"
class Access(Enum):
allow = "Allow"
deny = "Deny"
class ExpressRouteCircuitSkuTier(Enum):
standard = "Standard"
premium = "Premium"
class ExpressRouteCircuitSkuFamily(Enum):
unlimited_data = "UnlimitedData"
metered_data = "MeteredData"
class ServiceProviderProvisioningState(Enum):
not_provisioned = "NotProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
deprovisioning = "Deprovisioning"
class LoadDistribution(Enum):
default = "Default"
source_ip = "SourceIP"
source_ip_protocol = "SourceIPProtocol"
class ProbeProtocol(Enum):
http = "Http"
tcp = "Tcp"
class NetworkOperationStatus(Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
class EffectiveRouteSource(Enum):
unknown = "Unknown"
user = "User"
virtual_network_gateway = "VirtualNetworkGateway"
default = "Default"
class EffectiveRouteState(Enum):
active = "Active"
invalid = "Invalid"
class ProvisioningState(Enum):
succeeded = "Succeeded"
updating = "Updating"
deleting = "Deleting"
failed = "Failed"
class AssociationType(Enum):
associated = "Associated"
contains = "Contains"
class Direction(Enum):
inbound = "Inbound"
outbound = "Outbound"
class Protocol(Enum):
tcp = "TCP"
udp = "UDP"
class NextHopType(Enum):
internet = "Internet"
virtual_appliance = "VirtualAppliance"
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
hyper_net_gateway = "HyperNetGateway"
none = "None"
class PcProtocol(Enum):
tcp = "TCP"
udp = "UDP"
any = "Any"
class PcStatus(Enum):
not_started = "NotStarted"
running = "Running"
stopped = "Stopped"
error = "Error"
unknown = "Unknown"
class PcError(Enum):
internal_error = "InternalError"
agent_stopped = "AgentStopped"
capture_failed = "CaptureFailed"
local_file_failed = "LocalFileFailed"
storage_failed = "StorageFailed"
class VirtualNetworkPeeringState(Enum):
initiated = "Initiated"
connected = "Connected"
disconnected = "Disconnected"
class VirtualNetworkGatewayType(Enum):
vpn = "Vpn"
express_route = "ExpressRoute"
class VpnType(Enum):
policy_based = "PolicyBased"
route_based = "RouteBased"
class VirtualNetworkGatewaySkuName(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
class VirtualNetworkGatewaySkuTier(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
class BgpPeerState(Enum):
unknown = "Unknown"
stopped = "Stopped"
idle = "Idle"
connecting = "Connecting"
connected = "Connected"
class ProcessorArchitecture(Enum):
amd64 = "Amd64"
x86 = "X86"
class VirtualNetworkGatewayConnectionStatus(Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualNetworkGatewayConnectionType(Enum):
ipsec = "IPsec"
vnet2_vnet = "Vnet2Vnet"
express_route = "ExpressRoute"
vpn_client = "VPNClient"
| {
"content_hash": "c08ea686c3cfa0c356e997d202a3cba9",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 66,
"avg_line_length": 17.523219814241486,
"alnum_prop": 0.6786219081272085,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "4f5a411a8288cb184e7596c4334ff0cb8f973cd0",
"size": "6134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/network_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import dragon.updaters as updaters
import dragon.vm.theano as theano
import dragon.vm.theano.tensor as T
from dragon.vm.tensorflow.framework import ops
from dragon.vm.tensorflow.ops import variables
class Optimizer(object):
def __init__(self, use_locking, name):
if not name:
raise ValueError('Must specify the optimizer name.')
self._use_locking = use_locking
self._name = name
self._slots = {}
self.loss = self.updater = None
self.train = self.update = None
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None, **kwargs):
grads_and_vars = self.compute_gradients(loss, var_list)
return self.apply_gradients(grads_and_vars, global_step=global_step)
def compute_gradients(self, loss, var_list=None, **kwargs):
if var_list is None:
var_list = variables.trainable_variables() + \
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
self.loss = loss
grads = T.grad(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, **kwargs):
objs = set()
for grad_var in grads_and_vars:
self.updater.append((grad_var[1], grad_var[0])) # (var, grad)
for obj in grad_var[1].grad_objs: objs.add(obj)
self.objs = list(objs)
return self
def run(self, feed_dict=None):
# objective function
if not hasattr(self, '_objective_func'):
# find minimum solving targets
targets = set()
for t in self.objs: targets.add(t)
if feed_dict is not None:
self._objective_func = theano.function(inputs=feed_dict.keys(),
outputs=list(targets))
else:
self._objective_func = theano.function(outputs=list(targets))
if feed_dict is not None:
self._objective_func(*feed_dict.values())
else:
self._objective_func()
# update function
if not hasattr(self, '_update_func'):
self._update_func = theano.function(updater=self.updater)
self._update_func()
class GradientDescentOptimizer(Optimizer):
def __init__(self, learning_rate, use_locking=False, name='GradientDescent'):
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self.updater = updaters.SGDUpdater(learning_rate, 0.0)
class MomentumOptimizer(Optimizer):
def __init__(self, learning_rate, momentum,
use_locking=False, name='Momentum', use_nesterov=False):
super(MomentumOptimizer, self).__init__(use_locking, name)
if not use_nesterov:
self.updater = updaters.SGDUpdater(learning_rate, momentum)
else:
self.updater = updaters.NesterovUpdater(learning_rate, momentum)
class AdamOptimizer(Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name='Adam'):
super(AdamOptimizer, self).__init__(use_locking, name)
self.updater = updaters.AdamUpdater(learning_rate, beta1, beta2, epsilon)
class RMSPropOptimizer(Optimizer):
def __init__(self, learning_rate, decay, momentum, epsilon=1e-10,
use_locking=False, centered=False, name='RMSProp'):
super(RMSPropOptimizer, self).__init__(use_locking, name)
self.updater = updaters.RMSPropUpdater(learning_rate, decay, epsilon)
| {
"content_hash": "fb266154d97dc7acf749ae3fe0edf47d",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 81,
"avg_line_length": 39.13978494623656,
"alnum_prop": 0.6211538461538462,
"repo_name": "neopenx/Dragon",
"id": "7d232c80d6bfecd6f66996e8308474b2a0dfb21f",
"size": "3833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dragon/python/dragon/vm/tensorflow/training/optimizer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7082"
},
{
"name": "C++",
"bytes": "1024612"
},
{
"name": "CMake",
"bytes": "7849"
},
{
"name": "Cuda",
"bytes": "246400"
},
{
"name": "Makefile",
"bytes": "7409"
},
{
"name": "Python",
"bytes": "552459"
}
],
"symlink_target": ""
} |
from getpass import getpass
import time
import paramiko
def main():
ip_addr = '50.76.53.27'
username = 'pyclass'
password = getpass()
ssh_port = 8022
remote_conn_pre = paramiko.SSHClient()
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False, port=ssh_port)
remote_conn = remote_conn_pre.invoke_shell()
output = remote_conn.recv(65535)
print output
outp = remote_conn.send("terminal length 0\n")
time.sleep(2)
outp = remote_conn.recv(65535)
remote_conn.send("configure terminal\n")
time.sleep(2)
output = remote_conn.recv(65535)
remote_conn.send("logging buffered 10000\n")
time.sleep(2)
output = remote_conn.recv(65535)
outp = remote_conn.send("exit\n")
time.sleep(2)
outp = remote_conn.recv(65535)
outp = remote_conn.send("show logging\n")
time.sleep(2)
outp = remote_conn.recv(65535)
print outp
if __name__ == "__main__":
main() | {
"content_hash": "ff9239f5e9043804b11fc8bd17e4ee15",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 129,
"avg_line_length": 26.21951219512195,
"alnum_prop": 0.6632558139534884,
"repo_name": "linkdebian/pynet_course",
"id": "9b58b1869c17cec9db18891a93470037a772ff83",
"size": "1238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class4/exercise2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20547"
}
],
"symlink_target": ""
} |
import os
import avro.ipc
import avro.protocol
import tornavro.transceiver
proto = open(os.path.join(os.path.dirname(__file__), 'hello.avpr')).read()
proto = avro.protocol.parse(proto)
# Blocking client
client = tornavro.transceiver.SocketTransceiver('localhost', 8888)
requestor = avro.ipc.Requestor(proto, client)
print requestor.request(u'hello', dict(name=u'rich'))
client.close()
| {
"content_hash": "92e30aa337521fdfdf279d1a530a4a9b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 24.375,
"alnum_prop": 0.7589743589743589,
"repo_name": "richid/tornavro",
"id": "9ca2a35daf6aea4c6682550fb1c2e96cdc522c02",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hello/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9259"
}
],
"symlink_target": ""
} |
"""General demo data for Lino Avanti.
- Course providers and courses
"""
# from django.conf import settings
# from lino.utils import mti
from lino.utils import Cycler # join_words
from lino.utils.mldbc import babel_named as named
from lino.api import rt, dd, _
from lino.modlib.users.choicelists import UserTypes
from lino_xl.lib.cal.choicelists import Recurrencies
from lino_xl.lib.courses.choicelists import EnrolmentStates
course_stages = [
_("Dispens"),
_("Eingeschrieben"),
_("Abgeschlossen"),
_("Abgebrochen"),
_("Ausgeschlossen")]
trends_config = []
trends_config.append((
_("Info Integration"),
[ "!Erstgespräch",
"Sprachtest",
"Einschreibung in Sprachkurs",
"Einschreibung in Integrationskurs",
"!Bilanzgespräch"]))
trends_config.append((_("Alphabetisation"), course_stages))
trends_config.append((_("A1"), course_stages))
trends_config.append((_("A2"), course_stages))
trends_config.append((_("Citizen course"), course_stages))
trends_config.append((_("Professional integration"), [
"Begleitet vom DSBE",
"Begleitet vom ADG",
"Erwerbstätigkeit",
]))
def objects():
Line = rt.models.courses.Line
Teacher = dd.plugins.courses.teacher_model
Course = rt.models.courses.Course
Topic = rt.models.courses.Topic
Enrolment = rt.models.courses.Enrolment
CourseStates = rt.models.courses.CourseStates
User = rt.models.users.User
EventType = rt.models.cal.EventType
Guest = rt.models.cal.Guest
GuestRole = rt.models.cal.GuestRole
GuestStates = rt.models.cal.GuestStates
EntryStates = rt.models.cal.EntryStates
Event = rt.models.cal.Event
Person = rt.models.contacts.Person
CommentType = rt.models.comments.CommentType
TrendStage = rt.models.trends.TrendStage
TrendArea = rt.models.trends.TrendArea
for area, stages in trends_config:
ta = named(TrendArea, area)
yield ta
for stage in stages:
kw = dict(trend_area=ta)
if stage[0] == "!":
stage = stage[1:]
kw.update(subject_column=True)
yield named(TrendStage, stage, **kw)
yield EventType(**dd.str2kw('name', _("First contact")))
kw = dd.str2kw('name', _("Lesson"))
kw.update(dd.str2kw('event_label', _("Lesson")))
event_type = EventType(**kw)
yield event_type
pupil = named(GuestRole, _("Pupil"))
yield pupil
yield named(GuestRole, _("Assistant"))
topic_citizen = named(Topic, _("Citizen course"))
yield topic_citizen
topic_lang = named(Topic, _("Language courses"))
yield topic_lang
kw.update(topic=topic_citizen)
kw = dict(event_type=event_type, guest_role=pupil)
yield named(Line, _("Citizen course"), **kw)
kw.update(topic=topic_lang)
alpha = named(Line, _("Alphabetisation"), **kw)
yield alpha
yield named(Line, _("German for beginners"), **kw)
yield named(Line, _("German A1+"), **kw)
yield named(Line, _("German A2"), **kw)
yield named(Line, _("German A2 (women)"), **kw)
yield named(CommentType, _("Phone call"))
yield named(CommentType, _("Visit"))
yield named(CommentType, _("Individual consultation"))
yield named(CommentType, _("Internal meeting"))
yield named(CommentType, _("Meeting with partners"))
laura = Teacher(first_name="Laura", last_name="Lieblig")
yield laura
yield User(username="laura", user_type=UserTypes.teacher,
partner=laura)
yield User(username="nathalie", user_type=UserTypes.user)
yield User(username="nelly", user_type=UserTypes.user)
yield User(username="audrey", user_type=UserTypes.auditor)
yield User(username="martina", user_type=UserTypes.coordinator)
yield User(username="sandra", user_type=UserTypes.secretary)
USERS = Cycler(User.objects.exclude(
user_type__in=(UserTypes.auditor, UserTypes.admin)))
kw = dict(monday=True, tuesday=True, thursday=True, friday=True)
kw.update(
line=alpha,
start_date=dd.demo_date(-30),
start_time="9:00", end_time="12:00",
max_date=dd.demo_date(10),
state=CourseStates.active,
every_unit=Recurrencies.daily,
user=USERS.pop(),
teacher=laura,
max_places=5)
yield Course(**kw)
kw.update(start_time="14:00", end_time="17:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
kw.update(start_time="18:00", end_time="20:00", user=USERS.pop(),
max_places=15)
yield Course(**kw)
PUPILS = Cycler(dd.plugins.courses.pupil_model.objects.all())
# print(20170302, dd.plugins.courses.pupil_model.objects.all())
COURSES = Cycler(Course.objects.all())
STATES = Cycler(EnrolmentStates.objects())
def fits(course, pupil):
if course.max_places and course.get_free_places() == 0:
return False
if Enrolment.objects.filter(course=course, pupil=pupil).count():
return False
return True
def enrol(pupil):
course = COURSES.pop()
if fits(course, pupil):
kw = dict(user=USERS.pop(), course=course, pupil=pupil)
kw.update(request_date=dd.demo_date(-i))
kw.update(state=STATES.pop())
return Enrolment(**kw)
for i, p in enumerate(
dd.plugins.courses.pupil_model.objects.order_by('id')):
yield enrol(p)
if i % 2 == 0:
yield enrol(p)
if i % 3 == 0:
yield enrol(p)
ar = rt.login('robin')
for obj in Course.objects.all():
obj.update_auto_events(ar)
# Suggested calendar entries older than 7 days should be marked as
# either took_place or cancelled.
qs = Event.objects.filter(
start_date__lte=dd.demo_date(-7),
state=EntryStates.suggested)
for i, obj in enumerate(qs):
if i % 9:
obj.state = EntryStates.took_place
else:
obj.state = EntryStates.cancelled
obj.full_clean()
obj.save()
# participants of events which took place should be marked as
# either absent or present or excused:
qs = Guest.objects.filter(
event__start_date__lte=dd.demo_date(-7),
event__state=EntryStates.took_place).order_by('id')
STATES = Cycler(GuestStates.get_list_items())
for i, obj in enumerate(qs):
obj.state = STATES.pop()
# if i % 8:
# obj.state = GuestStates.present
# elif i % 3:
# obj.state = GuestStates.missing
# else:
# obj.state = GuestStates.excused
obj.full_clean()
obj.save()
| {
"content_hash": "03407375f71c7d417b2cb1fb9571ac4d",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 72,
"avg_line_length": 32.237864077669904,
"alnum_prop": 0.6291221201626261,
"repo_name": "lino-framework/book",
"id": "17cb8142b962cf2f3838147ce64dada88b7590a1",
"size": "6751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/avanti1/fixtures/demo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
from django import forms
from .models import Categoria
class PerguntaForm(forms.Form):
pergunta = forms.SlugField(label="Pergunta")
resposta = forms.CharField(label="Resposta", max_length=100)
alternativa = forms.CharField(label="Resposta Alternativa", max_length=100)
categoria = forms.ModelChoiceField(queryset=Categoria.objects.all())
class CategoriaForm(forms.Form):
nome = forms.CharField(label="Categoria", max_length=100)
| {
"content_hash": "3c9e6b1c0b28c01878c2df82c199eab4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.7555066079295154,
"repo_name": "vss-moraes/dcn_quizz",
"id": "397c7ef338f044473e73af9ba9f81e38adf3b867",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quizz/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "204"
},
{
"name": "HTML",
"bytes": "9092"
},
{
"name": "JavaScript",
"bytes": "727"
},
{
"name": "Python",
"bytes": "15802"
}
],
"symlink_target": ""
} |
from rest_framework import permissions
from bluebottle.tasks.models import Task, TaskMember
class IsTaskAuthorOrReadOnly(permissions.BasePermission):
"""
Allows access only to task author.
"""
def _get_task_from_request(self, request):
if request.data:
task_id = request.data.get('task', None)
else:
task_id = request.query_params.get('task', None)
if task_id:
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist:
return None
else:
return None
return task
def has_permission(self, request, view):
# Read permissions are allowed to any request, so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Test for objects/lists related to a Task (e.g TaskMember).
# Get the project form the request
task = self._get_task_from_request(request)
if task:
return task.author == request.user
return False
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request, so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Test for project model object-level permissions.
if isinstance(obj, Task):
return obj.author == request.user
if isinstance(obj, TaskMember):
return obj.task.author == request.user
class IsMemberOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request, so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Test for project model object-level permissions.
return isinstance(obj,
TaskMember) and obj.member == request.user
class IsMemberOrAuthorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request, so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
if isinstance(obj,
TaskMember) and obj.task.author == request.user:
return True
if isinstance(obj, TaskMember) and obj.member == request.user:
return True
return False
| {
"content_hash": "0ea083331fad8e3440588d6160c50926",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 107,
"avg_line_length": 34.56,
"alnum_prop": 0.6331018518518519,
"repo_name": "jfterpstra/bluebottle",
"id": "43f547118e7c1af38329e7997cb08bcbf34020ee",
"size": "2592",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bluebottle/bb_tasks/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16556"
},
{
"name": "HTML",
"bytes": "173443"
},
{
"name": "JavaScript",
"bytes": "434"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "1694079"
},
{
"name": "Shell",
"bytes": "2951"
},
{
"name": "Smarty",
"bytes": "4317"
}
],
"symlink_target": ""
} |
import rospy
import tensorflow as tf
import numpy as np
import scipy.spatial.distance
import errno
from os import listdir
from os.path import join
from os.path import basename
from shutil import copyfile
from scipy.misc import imread, imresize
from math import*
import heapq
import time
import cv2
import csv
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
print ('Load weights...')
for i, k in enumerate(keys):
sess.run(self.parameters[i].assign(weights[k]))
print ('Load complete.')
def square_rooted(x):
return round(sqrt(sum([a*a for a in x])),3)
def cosine_similarity(x,y):
numerator = sum(a*b for a,b in zip(x,y))
denominator = square_rooted(x)*square_rooted(y)
return round(numerator/float(denominator),3)
def retrieve_nsmallest_dist(query_image, test_dirs, out_dir, n, dist_type, weights_path, log_dir):
"""This function will compare a query image against all images provided in the test_dir, and save/log the smallest n images to the out_dir
Args:
query_path (str): Query image object
test_dirs (str): List of directories containing the test images
out_dir (str): Location to the save the retrieved images
n (int): Number of images to retrieve
dist_type (str): The distance algorithm (euc, cos, chev)
weights_path (str): CNN weights path
log_path (str): Path of the logs directory to save the logs in
"""
# Setup Session
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, weights_path, sess)
# Get number of images to match (default 4)
#dist_type = raw_input("Enter distance algorithm (euc, cos, chev): \n") or "euc"
#print("distance type selected: " + dist_type)
#dist_type="euc"
####################
###Perform Search###
####################
#Timer and precision count total + open file for saving data
t0 = time.time()
feat_dict = {}
#fp = open("Last_Run.txt", 'w')
#fp.truncate()
# Retrieve feature vector for query image
#Setup Dict and precision tracking
img_dict = {}
#img_query = imread(query_path)
img_query = imresize(query_image, (224, 224))
# Extract image descriptor in layer fc2/Relu. If you want, change fc2 to fc1
layer_query = sess.graph.get_tensor_by_name('fc2/Relu:0')
# layer_query = sess.graph.get_tensor_by_name('fc1/Relu:0')
# Run the session for feature extract at 'fc2/Relu' layer
_feature_query = sess.run(layer_query, feed_dict={vgg.imgs: [img_query]})
# Convert tensor variable into numpy array
# It is 4096 dimension vector
feature_query = np.array(_feature_query)
print (test_dirs)
for test_dir in test_dirs:
print (test_dir)
# Set dataset directory path
data_dir_test = test_dir
datalist_test = [join(data_dir_test, f) for f in listdir(data_dir_test)]
# Retrieve feature vector for test image
for j in datalist_test:
try:
img_test = imread(j)
img_test = cv2.cvtColor(img_test, cv2.COLOR_BGRA2BGR)
img_test = imresize(img_test, (224, 224))
except:
print("Error with file:\t"+j)
continue
# Extract image descriptor in layer fc2/Relu. If you want, change fc2 to fc1
layer_test = sess.graph.get_tensor_by_name('fc2/Relu:0')
# layer_test = sess.graph.get_tensor_by_name('fc1/Relu:0')
# Run the session for feature extract at 'fc2/Relu' layer
_feature_test = sess.run(layer_test, feed_dict={vgg.imgs: [img_test]})
# Convert tensor variable into numpy array
# It is 4096 dimension vector
feature_test = np.array(_feature_test)
feat_dict[j] = feature_test
# Calculate Euclidean distance between two feature vectors
if dist_type == "euc":
curr_dist = scipy.spatial.distance.euclidean(feature_query, feature_test)
# Calculate Cosine distance between two feature vectors
if dist_type == "cos":
curr_dist = scipy.spatial.distance.cosine(feature_query, feature_test)
# Calculate Chevyshev distance between two feature vectors
if dist_type == "chev":
curr_dist = scipy.spatial.distance.chebyshev(feature_query, feature_test)
# Add to dictionary
img_dict[curr_dist] = str(j)
fp = open(log_dir+"/"+ "retrieval_log.csv", 'w')
fp.truncate()
fpWriter = csv.writer(fp, delimiter='\t')
fpWriter.writerow(["File", "Distance"])
# Get Results for Query
keys_sorted = heapq.nsmallest(n, img_dict)
for y in range(0,min(n, len(keys_sorted))):
print(str(y+1) + ":\t" + "Distance: " + str(keys_sorted[y]) + ", FileName: " + basename(img_dict[keys_sorted[y]]))
#fp.write(str(y+1) + ":\t" + "Distance: " + str(keys_sorted[y]) + ", FileName: " + basename(img_dict[keys_sorted[y]]) +"\n")
fpWriter.writerow([basename(img_dict[keys_sorted[y]]), str(keys_sorted[y])])
#basename(img_dict[keys_sorted[y]])
copyfile(img_dict[keys_sorted[y]], out_dir + "/" + basename(img_dict[keys_sorted[y]]))
t1 = time.time()
total = t1-t0
#print("\nTime taken: " + str(total))
#fpWriter.writerow([])
#fp.write("\n\nTime taken: " + str(total) + "\n")
fp.close()
def retrieve_dist(query_image, test_path, dist_type, weights_path):
"""This function will compare a query image against all images provided in the test_dir, and save/log the smallest n images to the out_dir
Args:
query_image (np.darray): Query image path
test_dir (str): Location of the test images
dist_type (str): The distance algorithm (euc, cos, chev)
vgg (class vgg16): Object of the vgg16 class
sess(tf Session): Object of the tensorflow session
"""
# Setup Session
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, weights_path, sess)
# Get number of images to match (default 4)
#dist_type = raw_input("Enter distance algorithm (euc, cos, chev): \n") or "euc"
#print("distance type selected: " + dist_type)
#dist_type="euc"
####################
###Perform Search###
####################
#Timer and precision count total + open file for saving data
t0 = time.time()
feat_dict = {}
#fp = open("Last_Run.txt", 'w')
#fp.truncate()
# Retrieve feature vector for query image
#Setup Dict and precision tracking
img_dict = {}
#img_query = imread(query_path)
img_query = imresize(query_image, (224, 224))
img_test = imread(test_path)
img_test = imresize(img_test, (224, 224))
# Extract image descriptor in layer fc2/Relu. If you want, change fc2 to fc1
layer_query = sess.graph.get_tensor_by_name('fc2/Relu:0')
# layer_query = sess.graph.get_tensor_by_name('fc1/Relu:0')
# Run the session for feature extract at 'fc2/Relu' layer
_feature_query = sess.run(layer_query, feed_dict={vgg.imgs: [img_query]})
# Convert tensor variable into numpy array
# It is 4096 dimension vector
feature_query = np.array(_feature_query)
# Extract image descriptor in layer fc2/Relu. If you want, change fc2 to fc1
layer_test = sess.graph.get_tensor_by_name('fc2/Relu:0')
# layer_test = sess.graph.get_tensor_by_name('fc1/Relu:0')
# Run the session for feature extract at 'fc2/Relu' layer
_feature_test = sess.run(layer_test, feed_dict={vgg.imgs: [img_test]})
# Convert tensor variable into numpy array
# It is 4096 dimension vector
feature_test = np.array(_feature_test)
# Calculate Euclidean distance between two feature vectors
if dist_type == "euc":
curr_dist = scipy.spatial.distance.euclidean(feature_query, feature_test)
# Calculate Cosine distance between two feature vectors
if dist_type == "cos":
curr_dist = scipy.spatial.distance.cosine(feature_query, feature_test)
# Calculate Chevyshev distance between two feature vectors
if dist_type == "chev":
curr_dist = scipy.spatial.distance.chebyshev(feature_query, feature_test)
return curr_dist
| {
"content_hash": "4f8c5d424e4ccc873b53a8a1dccdfc65",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 142,
"avg_line_length": 45.53932584269663,
"alnum_prop": 0.5321490254132741,
"repo_name": "HassanAmr/Visio-based-Object-Placement",
"id": "e018858d9594dcbfccd8a5ed5c0bbae2f46cb4ac",
"size": "21358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visiobased_object_placement/scripts/image_retrieval.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "206883"
},
{
"name": "CMake",
"bytes": "47866"
},
{
"name": "Python",
"bytes": "54993"
}
],
"symlink_target": ""
} |
import abc
import gtimer as gt
from rlkit.core.rl_algorithm import BaseRLAlgorithm
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.samplers.data_collector import PathCollector
class BatchRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
def __init__(
self,
trainer,
exploration_env,
evaluation_env,
exploration_data_collector: PathCollector,
evaluation_data_collector: PathCollector,
replay_buffer: ReplayBuffer,
batch_size,
max_path_length,
num_epochs,
num_eval_steps_per_epoch,
num_expl_steps_per_train_loop,
num_trains_per_train_loop,
num_train_loops_per_epoch=1,
min_num_steps_before_training=0,
):
super().__init__(
trainer,
exploration_env,
evaluation_env,
exploration_data_collector,
evaluation_data_collector,
replay_buffer,
)
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
self.min_num_steps_before_training = min_num_steps_before_training
def _train(self):
if self.min_num_steps_before_training > 0:
init_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
for epoch in gt.timed_for(
range(self._start_epoch, self.num_epochs),
save_itrs=True,
):
self.eval_data_collector.collect_new_paths(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
)
gt.stamp('evaluation sampling')
for _ in range(self.num_train_loops_per_epoch):
new_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.num_expl_steps_per_train_loop,
discard_incomplete_paths=False,
)
gt.stamp('exploration sampling', unique=False)
self.replay_buffer.add_paths(new_expl_paths)
gt.stamp('data storing', unique=False)
self.training_mode(True)
for _ in range(self.num_trains_per_train_loop):
train_data = self.replay_buffer.random_batch(
self.batch_size)
self.trainer.train(train_data)
gt.stamp('training', unique=False)
self.training_mode(False)
self._end_epoch(epoch)
| {
"content_hash": "a5e235a6298f5c5d5b7ae9143839996e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 37.892857142857146,
"alnum_prop": 0.5673892554194157,
"repo_name": "vitchyr/rlkit",
"id": "848c5eb806ff23b19d9ef8611ea17b1ab5f9f876",
"size": "3183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlkit/core/batch_rl_algorithm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3338"
},
{
"name": "Python",
"bytes": "355210"
}
],
"symlink_target": ""
} |
"""
C code printer
The CCodePrinter converts single sympy expressions into single C expressions,
using the functions defined in math.h where possible.
A complete code generator, which uses ccode extensively, can be found in
sympy.utilities.codegen. The codegen module can be used to generate complete
source code files that are compilable without further modifications.
"""
from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import string_types
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in CCodePrinter._print_Function(self)
known_functions = {
"ceiling": [(lambda x: True, "ceil")],
"Abs": [(lambda x: not x.is_integer, "fabs")],
}
class CCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of c code"""
printmethod = "_ccode"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
}
def __init__(self, settings={}):
"""Register function mappings supplied by user"""
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
for k, v in userfuncs.items():
if not isinstance(v, list):
userfuncs[k] = [(lambda *x: True, v)]
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def doprint(self, expr, assign_to=None):
"""
Actually format the expression as C code.
"""
if isinstance(assign_to, string_types):
assign_to = C.Symbol(assign_to)
elif not isinstance(assign_to, (C.Basic, type(None))):
raise TypeError("CCodePrinter cannot assign to object of type %s" %
type(assign_to))
# keep a set of expressions that are not strictly translatable to C
# and number constants that must be declared and initialized
not_c = self._not_supported = set()
self._number_symbols = set()
# We treat top level Piecewise here to get if tests outside loops
lines = []
if isinstance(expr, C.Piecewise):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._doprint_a_piece(e, assign_to)
lines.extend(code0)
lines.append("}")
else:
code0 = self._doprint_a_piece(expr, assign_to)
lines.extend(code0)
# format the output
if self._settings["human"]:
frontlines = []
if len(not_c) > 0:
frontlines.append("// Not C:")
for expr in sorted(not_c, key=str):
frontlines.append("// %s" % repr(expr))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append("double const %s = %s;" % (name, value))
lines = frontlines + lines
lines = "\n".join(lines)
result = self.indent_code(lines)
else:
lines = self.indent_code("\n".join(lines))
result = self._number_symbols, not_c, lines
del self._not_supported
del self._number_symbols
return result
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){"
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return 'pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0L/%d.0L' % (p, q)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
inds = [ i.label for i in expr.indices ]
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += offset*inds[i]
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Exp1(self, expr):
return "M_E"
def _print_Pi(self, expr):
return 'M_PI'
def _print_Infinity(self, expr):
return 'HUGE_VAL'
def _print_NegativeInfinity(self, expr):
return '-HUGE_VAL'
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ""
if expr.args[-1].cond == True:
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
else:
ecpairs.append("(%s) ? (\n%s\n" %
(self._print(expr.args[-1].cond),
self._print(expr.args[-1].expr)))
code = "%s" + last_line
return code % ": ".join(ecpairs) + " )"
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_cfunc = self.known_functions[expr.func.__name__]
for cond, cfunc in cond_cfunc:
if cond(*expr.args):
return "%s(%s)" % (cfunc, self.stringify(expr.args, ", "))
if hasattr(expr, '_imp_') and isinstance(expr._imp_, C.Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
return CodePrinter._print_Function(self, expr)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def ccode(expr, assign_to=None, **settings):
r"""Converts an expr to a string of c code
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
precision : optional
the precision for numbers such as pi [default=15]
user_functions : optional
A dictionary where keys are FunctionClass instances and values
are their string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
human : optional
If True, the result is a single string that may contain some
constant declarations for the number symbols. If False, the
same information is returned in a more programmer-friendly
data structure.
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols(["x", "tau"])
>>> ccode((2*tau)**Rational(7,2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> ccode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
"""
return CCodePrinter(settings).doprint(expr, assign_to)
def print_ccode(expr, **settings):
"""Prints C representation of the given expression."""
print(ccode(expr, **settings))
| {
"content_hash": "f8da52823d90561347192d9d8f8762a1",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 82,
"avg_line_length": 35.96969696969697,
"alnum_prop": 0.5474936815501263,
"repo_name": "hrashk/sympy",
"id": "2d62e844f5c9f4abfa5c2a678b36569fa2bfe930",
"size": "9496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/printing/ccode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13971941"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1300"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
admin.autodiscover()
import logging
# set up logger, for debugging
logger = logging.getLogger('sierra.custom')
urlpatterns = [
url(r'^$', RedirectView.as_view(url=reverse_lazy('api-root'),
permanent=True)),
url(r'^api/', include('shelflist.urls')),
url(r'^api/', include('api.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.ADMIN_ACCESS:
urlpatterns.append(url(r'^admin/', include(admin.site.urls)))
if settings.DEBUG:
import debug_toolbar
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
| {
"content_hash": "4fe624ea4214c85d2fd70db4cac38a00",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 33.03846153846154,
"alnum_prop": 0.7112922002328289,
"repo_name": "unt-libraries/catalog-api",
"id": "dfd3d8355a1713f18b7c9eed8acf125fcf1dcd23",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/sierra/sierra/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7252"
},
{
"name": "CSS",
"bytes": "250"
},
{
"name": "Dockerfile",
"bytes": "1512"
},
{
"name": "HTML",
"bytes": "8099"
},
{
"name": "JavaScript",
"bytes": "598"
},
{
"name": "Makefile",
"bytes": "7425"
},
{
"name": "Python",
"bytes": "1186791"
},
{
"name": "Shell",
"bytes": "18463"
}
],
"symlink_target": ""
} |
import math
import copy
import string
class BBox():
def __init__(self, minx, miny, maxx, maxy):
self.minx = min(minx, maxx)
self.miny = min(miny, maxy)
self.maxx = max(minx, maxx)
self.maxy = max(miny, maxy)
def __str__(self):
return 'BBox[' + str(self.minx) + \
', ' + str(self.miny) + \
', ' + str(self.maxx) + \
', ' + str(self.maxy) + ']'
def extendByBBox(self, bbox):
self.minx = min(self.minx, bbox.minx)
self.miny = min(self.miny, bbox.miny)
self.maxx = max(self.maxx, bbox.maxx)
self.maxy = max(self.maxy, bbox.maxy)
def __add__(self, bbox):
result = copy.copy(self)
result.extendByBBox(bbox)
return result
def __iadd__(self, bbox):
self.extendByBBox(bbox)
def getArea(self):
return (self.maxx - self.minx) * (self.maxy - self.miny)
def contains(self, bbox):
return self.minx <= bbox.minx and \
self.miny <= bbox.miny and \
self.maxx >= bbox.maxx and \
self.maxy >= bbox.maxy
class Point():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __str__(self):
return 'Point[' + str(self.x) + ',' + str(self.y) + ']'
def __eq__(self, p2):
if self.x == p2.x and self.y == p2.y:
return True
else:
return False
def distanceTo(self, p2):
dx = p2.x - self.x
dy = p2.y - self.y
return math.sqrt((dx ** 2) + (dy ** 2))
def setFromWkt(self, wkt):
#sets x and y from well known text string
result = False
wkt = wkt.strip().upper()
if wkt[:5] == 'POINT':
wkt = wkt[5:].strip()
wkt = wkt[1:-1]
coords = wkt.split(' ')
if len(coords) == 2:
try:
x = float(coords[0])
y = float(coords[1])
self.x = x
self.y = y
result = True
except:
pass
return result
def getWkt(self):
strx = Util().formatNumber(self.x)
stry = Util().formatNumber(self.y)
return 'POINT (' + strx + ' ' + stry + ')'
def getBBox(self):
return BBox(self.x, self.y, self.x, self.y)
class Line():
def __init__(self, points = None):
if points:
self.points = points
else:
self.points = []
def __str__(self):
return 'Line[' + str(len(self.points)) + ' points]'
def addPoints(self, pointset):
if len(pointset) > 0:
for p in pointset:
self.points.append(p)
def addPoint(self, point):
self.points.append(point)
def addXy(self, x, y):
self.points.append(Point(x, y))
def getLength(self):
result = 0
if len(self.points) > 1:
for i in range(0, len(self.points) - 1):
result += self.points[i].distanceTo(self.points[i+1])
return result
def getBBox(self):
result = None
if len(self.points) > 0:
minx = self.points[0].x
miny = self.points[0].y
maxx = minx
maxy = miny
if len(self.points) > 1:
for p in self.points:
minx = min(minx, p.x)
miny = min(miny, p.y)
maxx = max(maxx, p.x)
maxy = max(maxy, p.y)
return BBox(minx, miny, maxx, maxy)
else:
return None
def __len__(self):
return len(self.points)
def getWkt(self):
result = "LINESTRING ("
result += ', '.join(self._getWktCoords())
result += ')'
return result
def setFromWkt(self, wkt):
self.points = []
#sets points from well known text (wkt) string
result = False
wkt = wkt.strip().upper()
if wkt[:10] == 'LINESTRING':
self.points = Util().wktPartToPoints(wkt[10:].strip())
else:
raise Exception('Invalid WKT for LINESTRING')
return result
def _getWktCoords(self, closed=False):
result = []
for p in self.points:
strx = Util().formatNumber(p.x)
stry = Util().formatNumber(p.y)
result.append(strx + ' ' + stry)
if not closed:
return result
else:
result.append(result[0])
return result
def _getRingArea(self):
result = 0
if len(self) > 2:
for i in range(0,len(self)-1):
dx = self.points[i + 1].x - self.points[i].x
ay = (self.points[i + 1].y + self.points[i].y) / 2.0
result += (dx * ay)
dx = self.points[0].x - self.points[len(self)-1].x
ay = (self.points[0].y + self.points[len(self)-1].y) / 2.0
result += (dx * ay)
return result
def _isRingClockwise(self):
if self._getRingArea() > 0:
return True
else:
return False
def reverse(self):
self.points = self.points[::-1]
class Polygon():
def __init__(self, outer = None, inner = None):
if outer:
self.outer = outer
else:
l = Line()
self.outer = l
if inner:
self.inner = inner
else:
self.inner = []
def __str__(self):
return 'Polygon[' + str(len(self.outer)) + ' boundary points, ' + str(len(self.inner)) + ' holes]'
def getBBox(self):
if len(self.outer) > 0:
result = self.outer.getBBox()
for hole in self.inner:
result.extendByBBox(hole.getBBox())
return result
else:
return None
def getWkt(self):
result = "POLYGON ("
outercoords = ', '.join(self.outer._getWktCoords(True))
result += '(' + outercoords + ')'
for i in self.inner:
innercoords = ', '.join(i._getWktCoords(True))
result + '(' + innercoords + ')'
result += ')'
return result
def getArea(self):
result = 0
result += abs(self.outer._getRingArea())
for hole in self.inner:
result -= abs(hole._getRingArea())
return result
def addInner(self, hole):
self.inner.append(hole)
def isValid(self):
#outer
if len(self.outer) < 3:
#raise Exception('invalid: outer boundary has less than 3 points')
return False
outerBBox = self.outer.getBBox()
if outerBBox.getArea() <= 0:
#raise Exception('invalid: outer boundary\'s bbox-area <= 0')
return False
if not self.outer._isRingClockwise():
#raise Exception('invalid: outer boundary is counter clockwise')
return False
for hole in self.inner:
if len(hole) < 3:
return False
innerBBox = hole.getBBox()
if innerBBox.getArea() <= 0:
#raise Exception('invalid: inner boundary\'s bbox-area <= 0')
return False
if hole._isRingClockwise():
#raise Exception('invalid: inner boundary is counter clockwise')
return False
if not outerBBox.contains(innerBBox):
#raise Exception('invalid: inner bbox is not contained by outer bbox')
return False
return True
def setFromWkt(self, wkt):
self.outer = Line()
self.inner = []
#sets boundaries from well known text (wkt) string
result = False
wkt = wkt.strip().upper()
if wkt[:7] == 'POLYGON':
#remove 'POLYGON' and first and last brackets
wkt = wkt[7:].strip()
firstbracket = wkt.find('(')
lastbracket = Util()._findBackward(wkt, ')')
wkt = wkt[firstbracket+1:lastbracket]
boundarylist = []
#TODO: Works for outer only. Should split all sections contained by ()()()
myset = Util().wktPartToPoints(wkt)
self.outer.addPoints(myset)
else:
raise Exception('Invalid WKT for POLYGON')
return result
# functionality
class Util():
def wktPartToPoints(self,part):
points = []
if part.count('(') == part.count(')'):
part = part.replace('(','').replace(')','')
for wktp in part.split(','):
strp = wktp.strip().split(' ')
if len(strp) == 2:
x = float(strp[0])
y = float(strp[1])
p = Point(x,y)
points.append(p)
return points
def formatNumber(self,num):
#num to string
strnum = str(num)
#check if there is a . inside
if strnum.find('.') > -1:
#remove trailing zeros
strnum2 = strnum.rstrip('0')
strnum3 = strnum2.rstrip('.')
return strnum3
else:
return strnum
def _findBackward(self,s,subs):
result = -1
index = s[::-1].find(subs)
if index > -1:
result = len(s) - index - 1
return result
| {
"content_hash": "a836318249af6f216751c36990848c16",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 106,
"avg_line_length": 30.00943396226415,
"alnum_prop": 0.48077124593943205,
"repo_name": "opengeogroep/geobo3",
"id": "1fb6baa5bd1e6751a7c6aa836ff144b137bf4ba4",
"size": "9543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geobo3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14653"
}
],
"symlink_target": ""
} |
import sys
import json
props = ['depth', 'height', 'italic', 'skew']
if len(sys.argv) > 1:
if sys.argv[1] == '--width':
props.append('width')
data = json.load(sys.stdin)
sep = "export default {\n "
for font in sorted(data):
sys.stdout.write(sep + json.dumps(font))
sep = ": {\n "
for glyph in sorted(data[font], key=int):
sys.stdout.write(sep + json.dumps(glyph) + ": ")
values = [value if value != 0.0 else 0 for value in
[data[font][glyph][key] for key in props]]
sys.stdout.write(json.dumps(values))
sep = ",\n "
sep = ",\n },\n "
sys.stdout.write(",\n },\n};\n")
| {
"content_hash": "c21ea3cb52f6c32d57f2ac557a7e1330",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.5310650887573964,
"repo_name": "kwangkim/KaTeX",
"id": "e659257f40981c35532456a8bce76b3c5755bd3b",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics/format_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14590"
},
{
"name": "HTML",
"bytes": "5337"
},
{
"name": "JavaScript",
"bytes": "728463"
},
{
"name": "Makefile",
"bytes": "2587"
},
{
"name": "Perl 6",
"bytes": "44246"
},
{
"name": "Python",
"bytes": "14682"
},
{
"name": "Shell",
"bytes": "7436"
},
{
"name": "TeX",
"bytes": "1062"
}
],
"symlink_target": ""
} |
from itsdangerous import Serializer, BadSignature
from itsdangerous import SignatureExpired
from sqlalchemy.orm import sessionmaker
import AppConfig
from dbmodels.Models import User, Post
class BaseService():
__dbEngine = None
__session = None
def __init__(self, dbEngine=None):
if dbEngine is None:
self.__dbEngine = AppConfig.DBENGINE
else:
self.__dbEngine = dbEngine
Session = sessionmaker(bind=self.__dbEngine)
self.__session = Session()
def get_session(self):
return self.__session
class PostService(BaseService):
def __init__(self):
super(PostService, self).__init__()
def getAll(self):
return self.get_session().query(Post)
def getById(self, id):
return self.get_session().query(Post).filter_by(id=id).first()
class UserService(BaseService):
def __init__(self):
super(UserService, self).__init__()
def getUserByUsername(self, username):
user = self.get_session().query(User).filter_by(username=username).first()
return user
def getById(self, id):
user = self.get_session().query(User).filter_by(id=id).first()
return user
def getAll(self):
return self.get_session().query(User)
def validate(self, username, password):
user = self.getAll().filter_by(username=username, password=password).first()
return user is not None | {
"content_hash": "14090a15f7528c82afe6852e7155db0d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 84,
"avg_line_length": 27.673076923076923,
"alnum_prop": 0.645587213342599,
"repo_name": "mandrive/FlaskTest",
"id": "d3b60b2ced7c44866343e701fb4627fdd70e9e71",
"size": "1439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/Services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "9909"
}
],
"symlink_target": ""
} |
"""Rename table for TimeEntry model
Revision ID: 1b91f4c7c1b
Revises: 28b17a6c5cf
Create Date: 2015-03-25 00:59:41.396413
"""
# revision identifiers, used by Alembic.
revision = '1b91f4c7c1b'
down_revision = '28b17a6c5cf'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.rename_table('times_entries', 'time_entries')
def downgrade():
op.rename_table('time_entries', 'times_entries')
| {
"content_hash": "069b646bff9ce3b1e5ecda5bc0251d86",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 18.863636363636363,
"alnum_prop": 0.7325301204819277,
"repo_name": "xuhcc/airy",
"id": "dc02db497551e22716562a0b04adb007c1d4d6dc",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/20150325_0059_rename_table_for_timeentry_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5531"
},
{
"name": "HTML",
"bytes": "15388"
},
{
"name": "JavaScript",
"bytes": "98979"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "97742"
},
{
"name": "SaltStack",
"bytes": "1572"
}
],
"symlink_target": ""
} |
"""Testcases for cssutils.css.cssproperties."""
import xml.dom
import basetest
import cssutils.css
import cssutils.profiles
class CSSPropertiesTestCase(basetest.BaseTestCase):
# def test_cssvalues(self):
# "cssproperties cssvalues"
# # does actually return match object, so a very simplified test...
# match = cssutils.css.cssproperties.cssvalues
#
# self.assertEqual(True, bool(match['color']('red')))
# self.assertEqual(False, bool(match['top']('red')))
#
# self.assertEqual(True, bool(match['left']('0')))
# self.assertEqual(True, bool(match['left']('1px')))
# self.assertEqual(True, bool(match['left']('.1px')))
# self.assertEqual(True, bool(match['left']('-1px')))
# self.assertEqual(True, bool(match['left']('-.1px')))
# self.assertEqual(True, bool(match['left']('-0.1px')))
def test_toDOMname(self):
"cssproperties _toDOMname(CSSname)"
_toDOMname = cssutils.css.cssproperties._toDOMname
self.assertEqual('color', _toDOMname('color'))
self.assertEqual('fontStyle', _toDOMname('font-style'))
self.assertEqual('MozOpacity', _toDOMname('-moz-opacity'))
self.assertEqual('UNKNOWN', _toDOMname('UNKNOWN'))
self.assertEqual('AnUNKNOWN', _toDOMname('-anUNKNOWN'))
def test_toCSSname(self):
"cssproperties _toCSSname(DOMname)"
_toCSSname = cssutils.css.cssproperties._toCSSname
self.assertEqual('color', _toCSSname('color'))
self.assertEqual('font-style', _toCSSname('fontStyle'))
self.assertEqual('-moz-opacity', _toCSSname('MozOpacity'))
self.assertEqual('UNKNOWN', _toCSSname('UNKNOWN'))
self.assertEqual('-anUNKNOWN', _toCSSname('AnUNKNOWN'))
def test_CSS2Properties(self):
"CSS2Properties"
CSS2Properties = cssutils.css.cssproperties.CSS2Properties
self.assertEqual(type(property()), type(CSS2Properties.color))
self.assertEqual(sum([len(x) for x in cssutils.profiles.properties.values()]),
len(CSS2Properties._properties))
c2 = CSS2Properties()
# CSS2Properties has simplified implementation return always None
self.assertEqual(None, c2.color)
self.assertEqual(None, c2.__setattr__('color', 1))
self.assertEqual(None, c2.__delattr__('color'))
# only defined properties
self.assertRaises(AttributeError, c2.__getattribute__, 'UNKNOWN')
if __name__ == '__main__':
import unittest
unittest.main()
| {
"content_hash": "8e45bb087211255f7a515b2222b310d7",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 86,
"avg_line_length": 40.23809523809524,
"alnum_prop": 0.6473372781065089,
"repo_name": "hackatbrown/2015.hackatbrown.org",
"id": "ce6ec4df0d6429d09826c888c0db21ff0b0d5c6c",
"size": "2535",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hack-at-brown-2015/cssutils/tests/test_cssproperties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2826195"
},
{
"name": "HTML",
"bytes": "853190"
},
{
"name": "JavaScript",
"bytes": "3333401"
},
{
"name": "Python",
"bytes": "3830632"
}
],
"symlink_target": ""
} |
"""
A collection of tests covering user management in DC/OS.
Assume that access control is activated in Master Admin Router (could be
disabled with `oauth_enabled`) and therefore authenticate individual HTTP
requests.
One aspect of DC/OS user management is that once authenticated a user can add
other users. Unauthenticated HTTP requests are rejected by Master Admin Router
and user management fails (this is the coarse-grained authorization model of
(open) DC/OS). Here, test that unauthenticated HTTP requests cannot manage
users. However, do not test that newly added users can add other users: in this
test suite we are limited to having authentication state for just a single user
available. This is why we can test managing other users only from that first
user's point of view. That is, we can not test that a user (e.g. user2) which
was added by the first user (user1) can add another user (user3).
"""
import logging
import pytest
from test_helpers import expanded_config
__maintainer__ = 'jgehrcke'
__contact__ = 'security-team@mesosphere.io'
log = logging.getLogger(__name__)
# Skip entire module in downstream integration tests.
if 'security' in expanded_config:
pytest.skip(
'Skip upstream-specific user management tests',
allow_module_level=True
)
def get_users(apisession):
r = apisession.get('/acs/api/v1/users')
r.raise_for_status()
users = {u['uid']: u for u in r.json()['array']}
return users
def delete_user(apisession, uid):
r = apisession.delete('/acs/api/v1/users/%s' % (uid, ))
r.raise_for_status()
assert r.status_code == 204
@pytest.fixture()
def remove_users_added_by_test(dcos_api_session):
users_before = set(get_users(dcos_api_session))
log.info('remove_users_added_by_test pre test: users are %s', users_before)
try:
yield
finally:
users_after = set(get_users(dcos_api_session))
new_uids = users_after - users_before
for uid in new_uids:
log.info('remove_users_added_by_test post test: remove `%s`', uid)
delete_user(dcos_api_session, uid)
def test_users_get(dcos_api_session):
users = get_users(dcos_api_session)
assert users
required_keys = ('uid', 'description')
for _, userdict in users.items():
for k in required_keys:
assert k in userdict
def test_user_put_no_email_uid(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user1')
# This is the current behavior. It does not need to stay in future versions.
assert r.status_code == 500
assert 'invalid email' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_email_uid(dcos_api_session):
# The current behavior is that the request body can be an empty JSON
# document. It does not need to stay in future versions.
r = dcos_api_session.put('/acs/api/v1/users/user1@email.de', json={})
r.raise_for_status()
assert r.status_code == 201
users = get_users(dcos_api_session)
assert len(users) > 1
assert 'user1@email.de' in users
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_optional_payload(dcos_api_session):
# This is the current behavior. It does not need to stay in future versions.
r = dcos_api_session.put(
'/acs/api/v1/users/user2@email.de',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 201, r.text
r = dcos_api_session.put(
'/acs/api/v1/users/user3@email.de',
json={'creator_uid': 'any@thing.bla', 'description': 'barfoo'}
)
assert r.status_code == 201, r.text
r = dcos_api_session.put(
'/acs/api/v1/users/user4@email.de',
json={'is_remote': False}
)
assert r.status_code == 201, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_conflict(dcos_api_session):
# This is the current behavior. It does not need to stay in future versions.
r = dcos_api_session.put(
'/acs/api/v1/users/user2@email.de',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 201, r.text
r = dcos_api_session.put(
'/acs/api/v1/users/user2@email.de',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 409, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_delete(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user6@email.de', json={})
r.raise_for_status()
assert r.status_code == 201
r = dcos_api_session.delete('/acs/api/v1/users/user6@email.de')
r.raise_for_status()
assert r.status_code == 204
users = get_users(dcos_api_session)
assert 'user6@email.de' not in users
def test_user_put_requires_authentication(noauth_api_session):
r = noauth_api_session.put('/acs/api/v1/users/user7@email.de', json={})
assert r.status_code == 401, r.text
def test_dynamic_ui_config(dcos_api_session):
r = dcos_api_session.get('/dcos-metadata/ui-config.json')
data = r.json()
assert not data['clusterConfiguration']['firstUser']
assert 'id' in data['clusterConfiguration']
assert 'uiConfiguration' in data
| {
"content_hash": "530c426705128b1dfa6291f9fd7633ed",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 32.54320987654321,
"alnum_prop": 0.6760242792109257,
"repo_name": "surdy/dcos",
"id": "8a8504de4aa3563345f1a9a066d9b7951825f104",
"size": "5272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/dcos-integration-test/extra/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "10736"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HTML",
"bytes": "88421"
},
{
"name": "Lua",
"bytes": "193851"
},
{
"name": "Makefile",
"bytes": "179"
},
{
"name": "PowerShell",
"bytes": "20007"
},
{
"name": "Python",
"bytes": "1354432"
},
{
"name": "Shell",
"bytes": "95258"
}
],
"symlink_target": ""
} |
import weather
weather.cli()
| {
"content_hash": "aab65d502e36fbec7ad061212af6030b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 14,
"avg_line_length": 14.5,
"alnum_prop": 0.7931034482758621,
"repo_name": "faustocarrera/weather-cli",
"id": "2acb0d40d6bb2ee531c0c63877c3c9ede1ab2056",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14342"
}
],
"symlink_target": ""
} |
from nose.tools import assert_equal
from nose.tools import assert_true
from proboscis import before_class
from proboscis import test
from trove.openstack.common import timeutils
from trove.tests.util import create_dbaas_client
from troveclient.compat import exceptions
from datetime import datetime
from trove.tests.util.users import Users
GROUP = "dbaas.api.limits"
DEFAULT_RATE = 200
DEFAULT_MAX_VOLUMES = 100
DEFAULT_MAX_INSTANCES = 55
DEFAULT_MAX_BACKUPS = 5
@test(groups=[GROUP])
class Limits(object):
@before_class
def setUp(self):
users = [
{
"auth_user": "rate_limit",
"auth_key": "password",
"tenant": "4000",
"requirements": {
"is_admin": False,
"services": ["trove"]
}
},
{
"auth_user": "rate_limit_exceeded",
"auth_key": "password",
"tenant": "4050",
"requirements": {
"is_admin": False,
"services": ["trove"]
}
}]
self._users = Users(users)
rate_user = self._get_user('rate_limit')
self.rd_client = create_dbaas_client(rate_user)
def _get_user(self, name):
return self._users.find_user_by_name(name)
def __is_available(self, next_available):
dt_next = timeutils.parse_isotime(next_available)
dt_now = datetime.now()
return dt_next.time() < dt_now.time()
def _get_limits_as_dict(self, limits):
d = {}
for l in limits:
d[l.verb] = l
return d
@test
def test_limits_index(self):
"""Test_limits_index."""
limits = self.rd_client.limits.list()
d = self._get_limits_as_dict(limits)
# remove the abs_limits from the rate limits
abs_limits = d.pop("ABSOLUTE", None)
assert_equal(abs_limits.verb, "ABSOLUTE")
assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES)
assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS)
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
for k in d:
assert_equal(d[k].verb, k)
assert_equal(d[k].unit, "MINUTE")
assert_true(int(d[k].remaining) <= DEFAULT_RATE)
assert_true(d[k].nextAvailable is not None)
@test
def test_limits_get_remaining(self):
"""Test_limits_get_remaining."""
limits = ()
for i in xrange(5):
limits = self.rd_client.limits.list()
d = self._get_limits_as_dict(limits)
abs_limits = d["ABSOLUTE"]
get = d["GET"]
assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES)
assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS)
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
assert_equal(get.verb, "GET")
assert_equal(get.unit, "MINUTE")
assert_true(int(get.remaining) <= DEFAULT_RATE - 5)
assert_true(get.nextAvailable is not None)
@test
def test_limits_exception(self):
"""Test_limits_exception."""
# use a different user to avoid throttling tests run out of order
rate_user_exceeded = self._get_user('rate_limit_exceeded')
rd_client = create_dbaas_client(rate_user_exceeded)
get = None
encountered = False
for i in xrange(DEFAULT_RATE + 50):
try:
limits = rd_client.limits.list()
d = self._get_limits_as_dict(limits)
get = d["GET"]
abs_limits = d["ABSOLUTE"]
assert_equal(get.verb, "GET")
assert_equal(get.unit, "MINUTE")
assert_equal(int(abs_limits.max_instances),
DEFAULT_MAX_INSTANCES)
assert_equal(int(abs_limits.max_backups),
DEFAULT_MAX_BACKUPS)
assert_equal(int(abs_limits.max_volumes),
DEFAULT_MAX_VOLUMES)
except exceptions.OverLimit:
encountered = True
assert_true(encountered)
assert_true(int(get.remaining) <= 50)
| {
"content_hash": "01ef86f0449c85d1c8c8f0da130df7a2",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 74,
"avg_line_length": 31.762962962962963,
"alnum_prop": 0.5573694029850746,
"repo_name": "changsimon/trove",
"id": "3ff56a86e13155dbda404cc0ba86cb71c04c58ea",
"size": "5020",
"binary": false,
"copies": "1",
"ref": "refs/heads/bug/1347114-dev",
"path": "trove/tests/api/limits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2620989"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import re
import subprocess
import sys
import decimal
from operator import attrgetter
from itertools import chain
from collections import Iterator
from datetime import datetime, date
from distutils.spawn import find_executable
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from multipledispatch import MDNotImplementedError
import datashape
from datashape import DataShape, Record, Option, var, dshape, Map
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover, datetime_, date_, float64, int64, int_, string
from datashape import float32
from datashape.dispatch import dispatch
from toolz import (partition_all, keyfilter, memoize, valfilter, identity,
concat, curry, merge)
from toolz.curried import pluck, map
from ..compatibility import unicode
from ..utils import keywords, ignoring, iter_except, filter_kwargs
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = int, float, datetime, date, bool, str, decimal.Decimal
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({
sa.DATETIME: datetime_,
sa.TIMESTAMP: datetime_,
sa.FLOAT: float64,
sa.DATE: date_,
sa.BIGINT: int64,
sa.INTEGER: int_,
sa.BIGINT: int64,
sa.types.NullType: string,
sa.REAL: float32,
sa.Float: float64,
sa.Float(precision=24): float32,
sa.Float(precision=53): float64,
})
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def getbind(t, bind):
if bind is None:
return t.bind
if isinstance(bind, sa.engine.base.Engine):
return bind
return sa.create_engine(bind)
def batch(sel, chunksize=10000, bind=None):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowterator(sel, chunksize=chunksize):
with getbind(sel, bind).connect() as conn:
result = conn.execute(sel)
yield result.keys()
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
terator = rowterator(sel)
return next(terator), concat(terator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.Interval(day_precision=0, second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return revtypes[type(typ)]
if isinstance(typ, (sa.NUMERIC, sa.DECIMAL)):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, typ.collation)
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.ForeignKey, sa.sql.FromClause)
def discover_foreign_key_relationship(fk, parent, parent_measure=None):
if fk.column.table is not parent:
parent_measure = discover(fk.column.table).measure
return {fk.parent.name: Map(discover(fk.parent.type), parent_measure)}
@discover.register(sa.Column)
def discover_sqlalchemy_column(c):
meta = Option if c.nullable else identity
return Record([(c.name, meta(discover(c.type)))])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
ordering = dict((c, i) for i, c in enumerate(c for c in t.columns.keys()))
records = list(sum([discover(c).parameters[0] for c in t.columns], ()))
fkeys = [discover(fkey, t, parent_measure=Record(records))
for fkey in t.foreign_keys]
for name, column in merge(*fkeys).items():
records[ordering[name]] = (name, column)
return var * Record(records)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = sa.MetaData(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
return discover(sa.MetaData(engine))
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % "\n\t".join(e.args) +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def validate_foreign_keys(ds, foreign_keys):
# passed foreign_keys and column in dshape, but not a ForeignKey type
for field in foreign_keys:
if field not in ds.measure.names:
raise TypeError('Requested foreign key field %r is not a field in '
'datashape %s' % (field, ds))
for field, typ in ds.measure.fields:
if field in foreign_keys and not isinstance(getattr(typ, 'ty', typ),
Map):
raise TypeError('Foreign key %s passed in but not a Map '
'datashape, got %s' % (field, typ))
if isinstance(typ, Map) and field not in foreign_keys:
raise TypeError('Map type %s found on column %s, but %r '
"wasn't found in %s" %
(typ, field, field, foreign_keys))
def dshape_to_table(name, ds, metadata=None, foreign_keys=None,
primary_key=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if not isrecord(ds.measure):
raise TypeError('dshape measure must be a record type e.g., '
'"{a: int64, b: int64}". Input measure is %r' %
ds.measure)
if metadata is None:
metadata = sa.MetaData()
if foreign_keys is None:
foreign_keys = {}
validate_foreign_keys(ds, foreign_keys)
cols = dshape_to_alchemy(ds, primary_key=primary_key or frozenset())
cols.extend(sa.ForeignKeyConstraint([column_name], [referent])
for column_name, referent in foreign_keys.items())
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, foreign_keys=None,
primary_key=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = sa.MetaData(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return engine
def dshape_to_alchemy(dshape, primary_key=frozenset()):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Map):
return dshape_to_alchemy(dshape.key.measure, primary_key=primary_key)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty, primary_key=primary_key)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(getattr(typ, 'ty', typ),
primary_key=primary_key),
primary_key=name in primary_key,
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1], primary_key=primary_key)
else:
return dshape_to_alchemy(dshape[0], primary_key=primary_key)
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.TEXT
string_types = dict(U=sa.Unicode, A=sa.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
return sa.DATETIME(timezone=dshape.tz is not None)
if isinstance(dshape, datashape.Decimal):
return sa.NUMERIC(dshape.precision, dshape.scale)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, bind=None, **kwargs):
_, rows = batch(sa.select([t]), bind=bind)
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, bind=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel, bind=bind)
return func(rows)
@convert.register(base, sa.sql.Select, cost=200.0)
def select_to_base(sel, dshape=None, bind=None, **kwargs):
if dshape is not None and not isscalar(dshape):
raise ValueError('dshape should be None or scalar, got %s' % dshape)
with getbind(sel, bind).connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, bind=None, **kwargs):
assert not isinstance(t, type)
engine = getbind(t, bind)
if not t.exists(bind=engine):
t.create(bind=engine)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
dshape = dshape and datashape.dshape(dshape)
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, bind=None, **kwargs):
t_bind = getbind(t, bind)
o_bind = getbind(o, bind)
if t_bind != o_bind:
return append(
t,
convert(Iterator, o, bind=bind, **kwargs),
bind=bind,
**kwargs
)
bind = t_bind
assert bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with bind.connect() as conn:
conn.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(
obj,
'before_create',
ddl.execute_if(
callable_=should_create_schema,
dialect='postgresql'
)
)
return obj
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
engine = sa.create_engine(uri, connect_args=kwargs.pop('connect_args', {}),
**filter_kwargs(sa.create_engine, kwargs))
ds = kwargs.pop('dshape', None)
schema = kwargs.pop('schema', None)
foreign_keys = kwargs.pop('foreign_keys', None)
primary_key = kwargs.pop('primary_key', None)
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = sa.MetaData(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(
sa.Table(table_name, metadata, schema=schema,
autoload_with=engine),
schema
)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata,
foreign_keys=foreign_keys,
primary_key=primary_key)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema,
foreign_keys=foreign_keys)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table, bind=None):
bind = getbind(table, bind)
table.drop(bind=bind, checkfirst=True)
if table.exists(bind=bind):
raise ValueError('table %r dropped but still exists' % table.name)
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
def select_or_selectable_to_frame(el, bind=None, **kwargs):
columns, rows = batch(el, bind=bind)
row = next(rows, None)
if row is None:
return pd.DataFrame(columns=columns)
return pd.DataFrame(list(chain([tuple(row)], map(tuple, rows))),
columns=columns)
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(
self,
element,
path,
delimiter=',',
quotechar='"',
lineterminator='\n',
escapechar='\\',
header=True,
na_value='',
encoding=None,
bind=None,
):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
self._bind = bind = getbind(element, bind)
# mysql cannot write headers
self.header = header and bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
self.encoding = encoding
@property
def bind(self):
return self._bind
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""COPY {0} TO :path
WITH (
FORMAT CSV,
HEADER :header,
DELIMITER :delimiter,
QUOTE :quotechar,
NULL :na_value,
ESCAPE :escapechar,
ENCODING :encoding
)
""".format(
compiler.preparer.format_table(selectable)
if isinstance(selectable, sa.Table)
else '({0})'.format(compiler.process(selectable))
)
).bindparams(
path=element.path,
header=element.header,
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar,
encoding=element.encoding or element.bind.execute(
'show client_encoding'
).scalar()
),
**kwargs
)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
return compiler.process(
sa.text(
"""{0} INTO OUTFILE :path
CHARACTER SET :encoding
FIELDS TERMINATED BY :delimiter
OPTIONALLY ENCLOSED BY :quotechar
ESCAPED BY :escapechar
LINES TERMINATED BY :lineterminator
""".format(
compiler.process(
selectable.select()
if isinstance(selectable, sa.Table) else selectable,
**kwargs
)
)
).bindparams(
path=element.path,
encoding=element.encoding or element.bind.execute(
'select @@character_set_client'
).scalar(),
delimiter=element.delimiter,
quotechar=element.quotechar,
escapechar=element.escapechar,
lineterminator=element.lineterminator
)
)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if element.encoding is not None:
raise ValueError(
"'encoding' keyword argument not supported for "
"SQLite to CSV conversion"
)
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
# we are sending a SQL string directorly to the SQLite process so we always
# need to bind everything before sending it
kwargs['literal_binds'] = True
selectable = element.element
sql = compiler.process(
selectable.select() if isinstance(selectable, sa.Table) else selectable,
**kwargs
) + ';'
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode(
sys.getfilesystemencoding() # we send bytes to the process
)
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, bind=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(
selectable,
os.path.abspath(csv.path),
bind=bind,
**kwargs
)
with getbind(selectable, bind).begin() as conn:
conn.execute(stmt)
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
| {
"content_hash": "b273459f931f0a27d19149c33bc56110",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 113,
"avg_line_length": 33.572958500669344,
"alnum_prop": 0.6080385980302245,
"repo_name": "cpcloud/odo",
"id": "802d95c9712e754803773f466b2733d3a611e321",
"size": "25079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odo/backends/sql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "429963"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, unicode_literals, absolute_import
from .base import MittensBaseInterface, IFLOGGER
import os.path as op
import numpy as np
from nipype.interfaces.base import (traits, File, isdefined,
BaseInterfaceInputSpec, TraitedSpec)
from glob import glob
class MittensTransitionProbabilityCalcInputSpec(BaseInterfaceInputSpec):
fibgz_file = File(exists=True, mandatory=True,
desc=('fib.gz (with ODF data) file from DSI Studio'))
odf_resolution = traits.Enum(("odf8", "odf6", "odf4"), usedefault=True)
nifti_prefix = traits.Str("mittens", usedefault=True,
desc=('output prefix for file names'))
real_affine_image = File(exists=True, mandatory=False,
desc=('NIfTI image with real affine to use'))
mask_image = File(exists=True, mandatory=False,
desc=('Only include non-zero voxels from this mask'))
step_size = traits.Float(np.sqrt(3)/2, usedefault=True,
desc=('Step size (in voxels)'))
angle_max = traits.Float(35., usedefault=True,
desc=('Turning angle maximum (in degrees)'))
angle_weights = traits.Enum(("flat", "weighted"), usedefault=True,
desc=('How to weight sequential turning angles'))
angle_weighting_power = traits.Float(1., usedefault=True, desc=(
'Sharpness of conditional turning angle probability distribution(in degrees)'))
normalize_doubleODF = traits.Bool(True,usedefault=True,desc=("This should be True"))
class MittensTransitionProbabilityCalcOutputSpec(TraitedSpec):
singleODF_CoDI = File(desc='')
doubleODF_CoDI = File(desc='')
doubleODF_CoAsy = File(desc='')
singleODF_probabilities = traits.List(desc=(''))
doubleODF_probabilities = traits.List(desc=(''))
nifti_prefix = traits.Str('')
class MittensTransitionProbabilityCalc(MittensBaseInterface):
"""
Calculates inter-voxel tract transition expectations (transition probabilities)
[Cieslak2017]_
.. [Cieslak2017] Cieslak, M., et al. NeuroImage 2017?.
Analytic tractography: A closed-form solution for estimating local white matter
connectivity with diffusion MRI
Example
-------
>>> from mittens.interfaces import MittensTransitionProbabilityCalc
>>> mtpc = MittensTransitionProbabilityCalc()
>>> mtpc.inputs.fib_file = 'something.odf8.fib.gz'
>>> res = mtpc.run() # doctest: +SKIP
"""
input_spec = MittensTransitionProbabilityCalcInputSpec
output_spec = MittensTransitionProbabilityCalcOutputSpec
def _run_interface(self, runtime):
from mittens import MITTENS
mask_image = self.inputs.mask_image if isdefined(self.inputs.mask_image) else ""
aff_img = self.inputs.real_affine_image if isdefined(self.inputs.real_affine_image) else ""
mitns = MITTENS(
fibgz_file=self.inputs.fibgz_file,
odf_resolution=self.inputs.odf_resolution,
real_affine_image = aff_img,
mask_image = mask_image,
step_size = self.inputs.step_size,
angle_max = self.inputs.angle_max,
angle_weights = self.inputs.angle_weights,
angle_weighting_power = self.inputs.angle_weighting_power,
normalize_doubleODF= self.inputs.normalize_doubleODF
)
IFLOGGER.info('Calculating transition probabilities')
mitns.calculate_transition_probabilities(output_prefix=self.inputs.nifti_prefix)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
prefix = op.abspath(self.inputs.nifti_prefix)
outputs['singleODF_CoDI'] = prefix + '_singleODF_CoDI.nii.gz'
outputs['doubleODF_CoDI'] = prefix + '_doubleODF_CoDI.nii.gz'
outputs['doubleODF_CoAsy'] = prefix + '_doubleODF_CoAsy.nii.gz'
outputs['singleODF_probabilities'] = glob(prefix+"*_singleODF_*_prob.nii.gz")
outputs['doubleODF_probabilities'] = glob(prefix+"*_doubleODF_*_prob.nii.gz")
outputs['nifti_prefix'] = prefix
return outputs
| {
"content_hash": "c7aa1a48986136bcc887bdc0abe318bb",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 99,
"avg_line_length": 46.855555555555554,
"alnum_prop": 0.6540194451031539,
"repo_name": "mattcieslak/MITTENS",
"id": "380b6c0b721f6a831ef92637bc2037b1116e4caf",
"size": "4241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mittens/interfaces/transition_probabilities.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1911"
},
{
"name": "Fortran",
"bytes": "14662998"
},
{
"name": "Jupyter Notebook",
"bytes": "118778"
},
{
"name": "Python",
"bytes": "143506"
},
{
"name": "Shell",
"bytes": "710"
}
],
"symlink_target": ""
} |
"""
The `mrs` package of pyDelphin contains classes and methods related to
Minimal Recursion Semantics (Copestake et al. 2005). In addition to
MRS, there are the related formalisms Robust Minimal Recursion Semantics
(RMRS; Copestake, 2003) and Dependency Minimal Recursion Semantics
(DMRS; Copestake, 2009). In pyDelphin, the greater MRS formalism is
referred to as \*MRS (so "MRS" then refers to the original formalism),
and the |Xmrs| class is implemented to handle all of them (note: RMRS
support is postponed until a need is established).
Users will interact mostly with |Xmrs| objects, but will not often
instantiate them directly. Instead, they are created by serializing
one of the various formats (such as :py:mod:`~delphin.mrs.simplemrs`,
:py:mod:`~delphin.mrs.mrx`, or :py:mod:`~delphin.mrs.dmrx`). No matter
what serialization format (or formalism) is used to load a \*MRS
structure, it will be stored the same way in memory, so any queries or
actions taken on these structures will use the same methods.
Internally, an |Xmrs| object may be built up of various component
classes, such as |ElementaryPredication|, |Node|, or
|HandleConstraint|.
"""
# notes for future documentation:
# ivs: the variable of the intrinsic
# argument of an |EP|. Conventionally, the sort of a IV is either
# ``e`` or ``x``. IVs are sometimes called "distinguished
# variables", "characteristic variables", or "ARG0s"
# labels: Every |EP| has a label, which is used to define quantifier
# scope. When more than one |EP| share a label, they share a scope,
# and are said to be in an **EP Conjunction**.
# check dependencies
import imp
try:
imp.find_module('networkx')
except ImportError as ex:
msg = '''\n
The `networkx` package is required for the `delphin.mrs` package.
You can install `networkx` in several ways:
* With your operating system\'s package manager (e.g.
`apt-get install python3-networkx` or `pacman -S python-networkx)
* With PIP (e.g. `pip install networkx`); make sure you're installing
for Python3 (you may need to use `pip3` instead of `pip`)
* Or from the project homepage: http://networkx.github.io'''
raise ImportError(msg) from ex
# these may be order-sensitive
from .components import (
Hook, Lnk, Node, ElementaryPredication, MrsVariable, Argument,
HandleConstraint, Pred, Link
)
from .xmrs import Xmrs, Mrs, Dmrs, Rmrs
__all__ = [Hook, Lnk, Node, ElementaryPredication, MrsVariable,
Argument, HandleConstraint, Pred, Link, Xmrs, Mrs, Dmrs]
def convert(txt, src_fmt, tgt_fmt, single=True, **kwargs):
"""
Convert a textual representation of \*MRS from one the src_fmt
representation to the tgt_fmt representation. By default, only
read and convert a single \*MRS object (e.g. for `mrx` this
starts at <mrs> and not <mrs-list>), but changing the `mode`
argument to `corpus` (alternatively: `list`) reads and converts
multiple \*MRSs.
Args:
txt: A string of semantic data.
src_fmt: The original representation format of txt.
tgt_fmt: The representation format to convert to.
single: If True, assume txt represents a single \*MRS, otherwise
read it as a corpus (or list) of \*MRSs.
kwargs: Any other keyword arguments to pass to the serializer
of the target format. Some options may include:
============ ====================================
option description
============ ====================================
pretty_print print with newlines and indentation
color print with syntax highlighting
============ ====================================
Returns:
A string in the target format.
Formats:
src_fmt and tgt_fmt may be one of the following:
========= ============================
format description
========= ============================
simplemrs The popular SimpleMRS format
mrx The XML format of MRS
dmrx The XML format of DMRS
========= ============================
"""
from importlib import import_module
try:
reader = import_module('{}.{}'.format('delphin.mrs', src_fmt.lower()))
except ImportError as ex:
msg = '\nCannot find serializer: {}'.format(src_fmt.lower())
raise ImportError(msg) from ex
try:
writer = import_module('{}.{}'.format('delphin.mrs', tgt_fmt.lower()))
except ImportError as ex:
msg = '\nCannot find serializer: {}'.format(tgt_fmt.lower())
raise ImportError(msg) from ex
return writer.dumps(
reader.loads(txt, single=single),
single=single,
**kwargs
)
#def load(fh, fmt, **kwargs):
# return loads(fh.read(), fmt, single=single)
#
#def loads(s, fmt, **kwargs):
# reader = serialization_formats[fmt.lower()]
# return reader.deserialize_corpus(s, **kwargs)
#
#def dump(fh, x, fmt, **kwargs):
# print(dumps(x, fmt, **kwargs), file=fh)
#
#def dumps(x, fmt, **kwargs):
# writer = serialization_formats[fmt.lower()]
# return writer.serialize_corpus(x, **kwargs)
#
#def load_one(fh, fmt, **kwargs):
# return loads_one(fh, fmt, **kwargs)
#
#def loads_one(s, fmt, **kwargs):
# reader = serialization_formats[fmt.lower()]
# return reader.deserialize_one(s, **kwargs)
#
#def dump_one(fh, x, fmt, **kwargs):
# print(dumps_one(x, fmt, **kwargs), file=fh)
#
#def dumps_one(x, fmt, **kwargs):
# writer = serialization_formats[fmt.lower()]
# return writer.serialize_one(x, **kwargs)
| {
"content_hash": "b49f971df4fd184b3cdbfa90c07f7304",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 39.785714285714285,
"alnum_prop": 0.6430879712746859,
"repo_name": "dantiston/pydelphin",
"id": "955852fb6d805ffba0bee00322ae7e92a4ac8154",
"size": "5571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "delphin/mrs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "250136"
},
{
"name": "Shell",
"bytes": "6707"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from muser.models import UserProfile
import json
import logging
logger = logging.getLogger(__name__)
#@login_required
def luckyday(request):
#logger.debug("request: %s" %request.user)
luck_data = request.POST.get('luck')
json_list = json.loads(luck_data)
#current_user = User.objects.get(username=request.user)
current_user = User.objects.get(username=json_list[0]["username"])
userProfile = current_user.get_profile()
userProfile.luckyday = json_list[0]["date"]
userProfile.save()
return HttpResponse(200)
#@login_required
def getday(request):
#logger.debug("request: %s" %request.user)
luck_data = request.POST.get('luck')
json_list = json.loads(luck_data)
#current_user = User.objects.get(username=request.user)
current_user = User.objects.get(username=json_list[0]["username"])
userProfile = current_user.get_profile()
lucky_date = userProfile.luckyday
ret_data = [
{"luckyday": str(lucky_date)},
]
return HttpResponse(toJSON(ret_data))
def toJSON(object):
"""Dumps the data represented by the object to JSON for wire transfer."""
return json.dumps(object, ensure_ascii=False)
| {
"content_hash": "b9e8a1a6def2107f82468d3fda69cbcb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.7532467532467533,
"repo_name": "youtaya/mothertree",
"id": "7cd7ef61669f3619cb4df5f9c90a64ee2bf250ea",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monthertree/muser/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83354"
},
{
"name": "HTML",
"bytes": "448"
},
{
"name": "JavaScript",
"bytes": "72342"
},
{
"name": "Python",
"bytes": "59011"
},
{
"name": "Shell",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""Layers for Sparse Transformer models."""
import tensorflow.compat.v1 as tf
from sgk.sparse import ops
def preprocess_attention_component(x):
shape = x.shape.as_list()
assert len(shape) == 4
return tf.reshape(x, [-1] + shape[2:])
def sparse_dot_product_attention(q, k, v, topology, **_):
q_3d, k_3d, v_3d = [preprocess_attention_component(x) for x in [q, k, v]]
logits = ops.replicated_sddmm(q_3d, k_3d, topology, transpose_rhs=True)
weights = ops.replicated_sparse_softmax(logits, topology)
out = ops.replicated_spmm(weights, topology, v_3d)
return tf.reshape(out, tf.shape(q))
def dot_product_attention(q, k, v, bias, **_):
"""Dot product attention with our memory efficient softmax."""
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.math.add(logits, bias)
weights = ops.fused_softmax(logits)
return tf.matmul(weights, v)
| {
"content_hash": "6a125e040901b81a3ab0ce3b868169e9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 33.34615384615385,
"alnum_prop": 0.6943483275663207,
"repo_name": "google-research/google-research",
"id": "1057b38e839212ac3a14e3ff0e20f187e06244c8",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgk/transformer/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import atexit
import fcntl
import logging
import os
import signal
import sys
from types import FrameType, TracebackType
from typing import NoReturn, Optional, Type
def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None:
"""daemonize the current process
This calls fork(), and has the main process exit. When it returns we will be
running in the child process.
"""
# If pidfile already exists, we should read pid from there; to overwrite it, if
# locking will fail, because locking attempt somehow purges the file contents.
if os.path.isfile(pid_file):
with open(pid_file) as pid_fh:
old_pid = pid_fh.read()
# Create a lockfile so that only one instance of this daemon is running at any time.
try:
lock_fh = open(pid_file, "w")
except OSError:
print("Unable to create the pidfile.")
sys.exit(1)
try:
# Try to get an exclusive lock on the file. This will fail if another process
# has the file locked.
fcntl.flock(lock_fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
print("Unable to lock on the pidfile.")
# We need to overwrite the pidfile if we got here.
#
# XXX better to avoid overwriting it, surely. this looks racey as the pid file
# could be created between us trying to read it and us trying to lock it.
with open(pid_file, "w") as pid_fh:
pid_fh.write(old_pid)
sys.exit(1)
# Fork, creating a new process for the child.
process_id = os.fork()
if process_id != 0:
# parent process: exit.
# we use os._exit to avoid running the atexit handlers. In particular, that
# means we don't flush the logs. This is important because if we are using
# a MemoryHandler, we could have logs buffered which are now buffered in both
# the main and the child process, so if we let the main process flush the logs,
# we'll get two copies.
os._exit(0)
# This is the child process. Continue.
# Stop listening for signals that the parent process receives.
# This is done by getting a new process id.
# setpgrp() is an alternative to setsid().
# setsid puts the process in a new parent group and detaches its controlling
# terminal.
os.setsid()
# point stdin, stdout, stderr at /dev/null
devnull = "/dev/null"
if hasattr(os, "devnull"):
# Python has set os.devnull on this system, use it instead as it might be
# different than /dev/null.
devnull = os.devnull
devnull_fd = os.open(devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
os.close(devnull_fd)
# now that we have redirected stderr to /dev/null, any uncaught exceptions will
# get sent to /dev/null, so make sure we log them.
#
# (we don't normally expect reactor.run to raise any exceptions, but this will
# also catch any other uncaught exceptions before we get that far.)
def excepthook(
type_: Type[BaseException],
value: BaseException,
traceback: Optional[TracebackType],
) -> None:
logger.critical("Unhanded exception", exc_info=(type_, value, traceback))
sys.excepthook = excepthook
# Set umask to default to safe file permissions when running as a root daemon. 027
# is an octal number which we are typing as 0o27 for Python3 compatibility.
os.umask(0o27)
# Change to a known directory. If this isn't done, starting a daemon in a
# subdirectory that needs to be deleted results in "directory busy" errors.
os.chdir(chdir)
try:
lock_fh.write("%s" % (os.getpid()))
lock_fh.flush()
except OSError:
logger.error("Unable to write pid to the pidfile.")
print("Unable to write pid to the pidfile.")
sys.exit(1)
# write a log line on SIGTERM.
def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn:
logger.warning("Caught signal %s. Stopping daemon." % signum)
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm)
# Cleanup pid file at exit.
def exit() -> None:
logger.warning("Stopping daemon.")
os.remove(pid_file)
sys.exit(0)
atexit.register(exit)
logger.warning("Starting daemon.")
| {
"content_hash": "7d35fbf24c02385c9dea8f4bc7bd52f1",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 88,
"avg_line_length": 34.44094488188976,
"alnum_prop": 0.6499771376314586,
"repo_name": "matrix-org/synapse",
"id": "031880ec39e13bce185872f16e770307b8b7ac00",
"size": "5047",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "synapse/util/daemonize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7229"
},
{
"name": "Dockerfile",
"bytes": "9316"
},
{
"name": "Gherkin",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "66000"
},
{
"name": "JavaScript",
"bytes": "15635"
},
{
"name": "Jinja",
"bytes": "7687"
},
{
"name": "Lua",
"bytes": "241"
},
{
"name": "Perl",
"bytes": "28191"
},
{
"name": "Python",
"bytes": "10632037"
},
{
"name": "Rust",
"bytes": "57034"
},
{
"name": "Shell",
"bytes": "53124"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.comments.forms import CommentForm
from comments.models import CommentWithRating
from comments.widgets import StarsRadioFieldRenderer
RATING_CHOICES = (
(1,1),
(2,2),
(3,3),
(4,4),
(5,5),
)
class CommentFormWithRating(CommentForm):
rating = forms.CharField(widget=forms.RadioSelect(renderer=StarsRadioFieldRenderer, attrs={'class':'star'}, choices=RATING_CHOICES))
def get_comment_model(self):
# Use our custom comment model instead of the built-in one.
return CommentWithRating
def get_comment_create_data(self):
# Use the data of the superclass, and add in the title field
data = super(CommentFormWithRating, self).get_comment_create_data()
data['rating'] = self.cleaned_data['rating']
return data
| {
"content_hash": "4d190df9d7d7f700082380bacf538f22",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 136,
"avg_line_length": 32.34615384615385,
"alnum_prop": 0.6932223543400713,
"repo_name": "copelco/Durham-Open-Data-Catalog",
"id": "3e21d60523ebdceefa78eb9fe7fb9f313ce82ebd",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenDataCatalog/comments/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "45"
},
{
"name": "JavaScript",
"bytes": "36551"
},
{
"name": "Python",
"bytes": "136150"
},
{
"name": "Ruby",
"bytes": "1417"
},
{
"name": "Scheme",
"bytes": "5830"
}
],
"symlink_target": ""
} |
import logging
import asyncio
import datetime
import warnings
from typing import Any, TYPE_CHECKING, Union, List, Optional, Mapping, cast
import uamqp
from uamqp import SendClientAsync, types
from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential
from .._common.message import (
ServiceBusMessage,
ServiceBusMessageBatch,
)
from ..amqp import AmqpAnnotatedMessage
from .._servicebus_sender import SenderMixin
from ._base_handler_async import BaseHandler
from .._common.constants import (
REQUEST_RESPONSE_SCHEDULE_MESSAGE_OPERATION,
REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION,
MGMT_REQUEST_SEQUENCE_NUMBERS,
SPAN_NAME_SCHEDULE,
)
from .._common import mgmt_handlers
from .._common.utils import (
transform_messages_if_needed,
send_trace_context_manager,
trace_message,
)
from ._async_utils import create_authentication
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
MessageTypes = Union[
Mapping[str, Any],
ServiceBusMessage,
AmqpAnnotatedMessage,
List[Union[Mapping[str, Any], ServiceBusMessage, AmqpAnnotatedMessage]],
]
MessageObjTypes = Union[
ServiceBusMessage,
ServiceBusMessageBatch,
AmqpAnnotatedMessage,
List[Union[ServiceBusMessage, AmqpAnnotatedMessage]],
]
_LOGGER = logging.getLogger(__name__)
class ServiceBusSender(BaseHandler, SenderMixin):
"""The ServiceBusSender class defines a high level interface for
sending messages to the Azure Service Bus Queue or Topic.
**Please use the `get_<queue/topic>_sender` method of ~azure.servicebus.aio.ServiceBusClient to create a
ServiceBusSender instance.**
:ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:vartype fully_qualified_namespace: str
:ivar entity_name: The name of the entity that the client connects to.
:vartype entity_name: str
:param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:param credential: The credential object used for authentication which
implements a particular interface for getting tokens. It accepts
credential objects generated by the azure-identity library and objects that implement the
`get_token(self, *scopes)` method, or alternatively, an AzureSasCredential can be provided too.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential or ~azure.core.credentials.AzureSasCredential
or ~azure.core.credentials.AzureNamedKeyCredential
:keyword str queue_name: The path of specific Service Bus Queue the client connects to.
Only one of queue_name or topic_name can be provided.
:keyword str topic_name: The path of specific Service Bus Topic the client connects to.
Only one of queue_name or topic_name can be provided.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword Dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
:keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
:keyword str client_identifier: A string-based identifier to uniquely identify the client instance.
Service Bus will associate it with some error messages for easier correlation of errors.
If not specified, a unique id will be generated.
"""
def __init__(
self,
fully_qualified_namespace: str,
credential: Union[
"AsyncTokenCredential", AzureSasCredential, AzureNamedKeyCredential
],
*,
queue_name: Optional[str] = None,
topic_name: Optional[str] = None,
**kwargs: Any,
) -> None:
if kwargs.get("entity_name"):
super(ServiceBusSender, self).__init__(
fully_qualified_namespace=fully_qualified_namespace,
credential=credential,
**kwargs,
)
else:
if queue_name and topic_name:
raise ValueError(
"Queue/Topic name can not be specified simultaneously."
)
if not (queue_name or topic_name):
raise ValueError(
"Queue/Topic name is missing. Please specify queue_name/topic_name."
)
entity_name = queue_name or topic_name
super(ServiceBusSender, self).__init__(
fully_qualified_namespace=fully_qualified_namespace,
credential=credential,
entity_name=str(entity_name),
queue_name=queue_name,
topic_name=topic_name,
**kwargs,
)
self._max_message_size_on_link = 0
self._create_attribute(**kwargs)
self._connection = kwargs.get("connection")
@classmethod
def _from_connection_string(
cls, conn_str: str, **kwargs: Any
) -> "ServiceBusSender":
"""Create a ServiceBusSender from a connection string.
:param str conn_str: The connection string of a Service Bus.
:keyword str queue_name: The path of specific Service Bus Queue the client connects to.
:keyword str topic_name: The path of specific Service Bus Topic the client connects to.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp`.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword Dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
:keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
:rtype: ~azure.servicebus.aio.ServiceBusSender
:raises ~azure.servicebus.ServiceBusAuthenticationError: Indicates an issue in token/identity validity.
:raises ~azure.servicebus.ServiceBusAuthorizationError: Indicates an access/rights related failure.
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START create_servicebus_sender_from_conn_str_async]
:end-before: [END create_servicebus_sender_from_conn_str_async]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusSender from connection string.
"""
constructor_args = cls._convert_connection_string_to_kwargs(conn_str, **kwargs)
return cls(**constructor_args)
def _create_handler(self, auth):
self._handler = SendClientAsync(
self._entity_uri,
auth=auth,
debug=self._config.logging_enable,
properties=self._properties,
error_policy=self._error_policy,
client_name=self._name,
keep_alive_interval=self._config.keep_alive,
encoding=self._config.encoding,
)
async def _open(self):
# pylint: disable=protected-access
if self._running:
return
if self._handler:
await self._handler.close_async()
auth = None if self._connection else (await create_authentication(self))
self._create_handler(auth)
try:
await self._handler.open_async(connection=self._connection)
while not await self._handler.client_ready_async():
await asyncio.sleep(0.05)
self._running = True
self._max_message_size_on_link = (
self._handler.message_handler._link.peer_max_message_size
or uamqp.constants.MAX_MESSAGE_LENGTH_BYTES
)
except:
await self._close_handler()
raise
async def _send(self, message, timeout=None, last_exception=None):
await self._open()
default_timeout = self._handler._msg_timeout # pylint: disable=protected-access
try:
self._set_msg_timeout(timeout, last_exception)
await self._handler.send_message_async(message.message)
finally: # reset the timeout of the handler back to the default value
self._set_msg_timeout(default_timeout, None)
async def schedule_messages(
self,
messages: MessageTypes,
schedule_time_utc: datetime.datetime,
*,
timeout: Optional[float] = None,
**kwargs: Any,
) -> List[int]:
"""Send Message or multiple Messages to be enqueued at a specific time by the service.
Returns a list of the sequence numbers of the enqueued messages.
:param messages: The message or list of messages to schedule.
:type messages: Union[~azure.servicebus.ServiceBusMessage, ~azure.servicebus.amqp.AmqpAnnotatedMessage,
List[Union[~azure.servicebus.ServiceBusMessage, ~azure.servicebus.amqp.AmqpAnnotatedMessage]]]
:param schedule_time_utc: The utc date and time to enqueue the messages.
:type schedule_time_utc: ~datetime.datetime
:keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
greater than 0 if specified. The default value is None, meaning no timeout.
:rtype: list[int]
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START scheduling_messages_async]
:end-before: [END scheduling_messages_async]
:language: python
:dedent: 4
:caption: Schedule a message to be sent in future
"""
if kwargs:
warnings.warn(f"Unsupported keyword args: {kwargs}")
# pylint: disable=protected-access
self._check_live()
obj_messages = transform_messages_if_needed(messages, ServiceBusMessage)
if timeout is not None and timeout <= 0:
raise ValueError("The timeout must be greater than 0.")
with send_trace_context_manager(span_name=SPAN_NAME_SCHEDULE) as send_span:
if isinstance(obj_messages, ServiceBusMessage):
request_body = self._build_schedule_request(
schedule_time_utc, send_span, obj_messages
)
else:
if len(obj_messages) == 0:
return [] # No-op on empty list.
request_body = self._build_schedule_request(
schedule_time_utc, send_span, *obj_messages
)
if send_span:
await self._add_span_request_attributes(send_span)
return await self._mgmt_request_response_with_retry(
REQUEST_RESPONSE_SCHEDULE_MESSAGE_OPERATION,
request_body,
mgmt_handlers.schedule_op,
timeout=timeout,
)
async def cancel_scheduled_messages(
self,
sequence_numbers: Union[int, List[int]],
*,
timeout: Optional[float] = None,
**kwargs: Any,
) -> None:
"""
Cancel one or more messages that have previously been scheduled and are still pending.
:param sequence_numbers: The sequence numbers of the scheduled messages.
:type sequence_numbers: int or list[int]
:keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
greater than 0 if specified. The default value is None, meaning no timeout.
:rtype: None
:raises: ~azure.servicebus.exceptions.ServiceBusError if messages cancellation failed due to message already
cancelled or enqueued.
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START cancel_scheduled_messages_async]
:end-before: [END cancel_scheduled_messages_async]
:language: python
:dedent: 4
:caption: Cancelling messages scheduled to be sent in future
"""
if kwargs:
warnings.warn(f"Unsupported keyword args: {kwargs}")
self._check_live()
if timeout is not None and timeout <= 0:
raise ValueError("The timeout must be greater than 0.")
if isinstance(sequence_numbers, int):
numbers = [types.AMQPLong(sequence_numbers)]
else:
numbers = [types.AMQPLong(s) for s in sequence_numbers]
if len(numbers) == 0:
return None # no-op on empty list.
request_body = {MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray(numbers)}
return await self._mgmt_request_response_with_retry(
REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION,
request_body,
mgmt_handlers.default,
timeout=timeout,
)
async def send_messages(
self,
message: Union[MessageTypes, ServiceBusMessageBatch],
*,
timeout: Optional[float] = None,
**kwargs: Any,
) -> None:
"""Sends message and blocks until acknowledgement is received or operation times out.
If a list of messages was provided, attempts to send them as a single batch, throwing a
`ValueError` if they cannot fit in a single batch.
:param message: The ServiceBus message to be sent.
:type message: Union[~azure.servicebus.ServiceBusMessage, ~azure.servicebus.ServiceBusMessageBatch,
~azure.servicebus.amqp.AmqpAnnotatedMessage, List[Union[~azure.servicebus.ServiceBusMessage,
~azure.servicebus.amqp.AmqpAnnotatedMessage]]]
:keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
The value must be greater than 0 if specified. The default value is None, meaning no timeout.
:rtype: None
:raises:
:class: ~azure.servicebus.exceptions.OperationTimeoutError if sending times out.
:class: ~azure.servicebus.exceptions.MessageSizeExceededError if the size of the message is over
service bus frame size limit.
:class: ~azure.servicebus.exceptions.ServiceBusError when other errors happen such as connection
error, authentication error, and any unexpected errors.
It's also the top-level root class of above errors.
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START send_async]
:end-before: [END send_async]
:language: python
:dedent: 4
:caption: Send message.
"""
if kwargs:
warnings.warn(f"Unsupported keyword args: {kwargs}")
self._check_live()
if timeout is not None and timeout <= 0:
raise ValueError("The timeout must be greater than 0.")
with send_trace_context_manager() as send_span:
if isinstance(message, ServiceBusMessageBatch):
obj_message = message # type: MessageObjTypes
else:
obj_message = transform_messages_if_needed( # type: ignore
message, ServiceBusMessage
)
try:
batch = await self.create_message_batch()
batch._from_list(obj_message, send_span) # type: ignore # pylint: disable=protected-access
obj_message = batch
except TypeError: # Message was not a list or generator.
trace_message(cast(ServiceBusMessage, obj_message), send_span)
if (
isinstance(obj_message, ServiceBusMessageBatch)
and len(obj_message) == 0
): # pylint: disable=len-as-condition
return # Short circuit noop if an empty list or batch is provided.
if send_span:
await self._add_span_request_attributes(send_span)
await self._do_retryable_operation(
self._send,
message=obj_message,
timeout=timeout,
operation_requires_timeout=True,
require_last_exception=True,
)
async def create_message_batch(
self, max_size_in_bytes: Optional[int] = None
) -> ServiceBusMessageBatch:
"""Create a ServiceBusMessageBatch object with the max size of all content being constrained by
max_size_in_bytes. The max_size should be no greater than the max allowed message size defined by the service.
:param Optional[int] max_size_in_bytes: The maximum size of bytes data that a ServiceBusMessageBatch object can
hold. By default, the value is determined by your Service Bus tier.
:rtype: ~azure.servicebus.ServiceBusMessageBatch
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START create_batch_async]
:end-before: [END create_batch_async]
:language: python
:dedent: 4
:caption: Create ServiceBusMessageBatch object within limited size
"""
self._check_live()
if not self._max_message_size_on_link:
await self._open_with_retry()
if max_size_in_bytes and max_size_in_bytes > self._max_message_size_on_link:
raise ValueError(
"Max message size: {} is too large, acceptable max batch size is: {} bytes.".format(
max_size_in_bytes, self._max_message_size_on_link
)
)
return ServiceBusMessageBatch(
max_size_in_bytes=(max_size_in_bytes or self._max_message_size_on_link)
)
@property
def client_identifier(self) -> str:
"""
Get the ServiceBusSender client identifier associated with the sender instance.
:rtype: str
"""
return self._name
def __str__(self) -> str:
return f"Sender client id: {self.client_identifier}, entity: {self.entity_name}"
| {
"content_hash": "e5c298e520587958ecfcb91a824af029",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 119,
"avg_line_length": 44.51981351981352,
"alnum_prop": 0.6376250065448453,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ccbdd6e4ab20855afad70ec1e48359ac8e25f5cc",
"size": "19444",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-servicebus/azure/servicebus/aio/_servicebus_sender_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
__author__ = 'ajpina'
from uffema.pockets import PMPocket
from uffema.misc.constants import *
class PMUPocket(PMPocket):
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def get_type(self):
return 'U'
def __init__(self, pockets_settings, magnets_settings, magnet_type='rectangular'):
PMPocket.__init__(self, pockets_settings)
self.extension1 = pockets_settings['Pe1']
self.extension2 = pockets_settings['Pe2']
self.length = pockets_settings['Pl']
self.flux_barrier_length = pockets_settings['FBl']
self.bridge_length = pockets_settings['Bl']
self.pocket_angle = pockets_settings['Ua'] * DEG2RAD
if magnet_type == 'rectangular':
self.magnet_length = magnets_settings['Ml']
self.magnet_width = magnets_settings['Mw']
self.magnet_radius = magnets_settings['Mr']
self.type = self.type + 'U'
def get_pocket_geometry(self, outer_radius):
points = []
lines = []
radius_at_bridge = np.sqrt((outer_radius - self.bridge_length)**2 + (0.5*self.flux_barrier_length)**2)
delta_beta_at_bridge = np.arctan2(self.flux_barrier_length, 2 * (outer_radius - self.bridge_length))
points.append({
'900': [self.magnet_radius + 0.5*self.magnet_length, -0.5*self.magnet_width, 0],
'901': [self.magnet_radius + 0.5 * self.length, -(0.5 * self.magnet_width + self.extension1), 0],
'902': [radius_at_bridge * np.cos(-0.5*self.pocket_angle + delta_beta_at_bridge),
radius_at_bridge * np.sin(-0.5*self.pocket_angle + delta_beta_at_bridge), 0],
'903': [radius_at_bridge * np.cos(-0.5 * self.pocket_angle - delta_beta_at_bridge),
radius_at_bridge * np.sin(-0.5 * self.pocket_angle - delta_beta_at_bridge), 0],
'904': [self.magnet_radius - 0.5 * self.length, -(0.5 * self.magnet_width + self.extension1 + self.extension2), 0],
'905': [self.magnet_radius - 0.5 * self.length, -(0.5 * self.magnet_width + self.extension1), 0],
'906': [self.magnet_radius - 0.5*self.magnet_length, -0.5*self.magnet_width, 0]
})
lines.append({
'900': [900, 901],
'901': [901, 902],
'902': [902, 903],
'903': [903, 904],
'904': [904, 905],
'905': [905, 906],
'906': [906, 900]
})
return points, lines
| {
"content_hash": "599bbb19d4197d76975aa98b28c62a9f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 127,
"avg_line_length": 42.38333333333333,
"alnum_prop": 0.569799449469131,
"repo_name": "ajpina/uffema",
"id": "1436fd7c5b6f349948c8e1153551a6ae15890712",
"size": "3341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uffema/pockets/pm_upocket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "178150"
}
],
"symlink_target": ""
} |
from django import forms
from markymark.fields import MarkdownFormField
class MarkdownForm(forms.Form):
content = MarkdownFormField()
| {
"content_hash": "c4b32725850acdd2d01c16219070ffaf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.8014184397163121,
"repo_name": "moccu/django-markymark",
"id": "32ec0cdd028f237e1e28fd1c75827c0e52d1b7c8",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6033"
},
{
"name": "HTML",
"bytes": "477"
},
{
"name": "JavaScript",
"bytes": "55352"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Python",
"bytes": "19361"
}
],
"symlink_target": ""
} |
from basic import Basic, S, C
from operations import AssocOp
from cache import cacheit
from symbol import Symbol
class Add(AssocOp):
__slots__ = []
is_Add = True
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
"""
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # standalone term
# e.g. 3 + ...
order_factors = []
for o in seq:
# O(x)
if o.is_Order:
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o]+[o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3
elif o.is_Number:
coeff += o
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c = o.args[0]
# 3*...
if c.is_Number:
if c is S.One:
s = o
else:
s = o.as_two_terms()[1]
else:
c = S.One
s = o
# everything else
else:
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s,c in terms.items():
# 0*s
if c is S.Zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps it's arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c,s))
noncommutative = noncommutative or not s.is_commutative
# nan
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
# oo, -oo
elif (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
newseq = [f for f in newseq if not f.is_real]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
# Currently we sort things using hashes, as it is quite fast. A better
# solution is not to sort things at all - but this needs some more
# fixing.
newseq.sort(key=hash)
# current code expects coeff to be always in slot-0
if coeff is not S.Zero:
newseq.insert(0, coeff)
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@cacheit
def as_coeff_factors(self, x=None):
if x is not None:
l1 = []
l2 = []
for f in self.args:
if f.has(x):
l2.append(f)
else:
l1.append(f)
return Add(*l1), tuple(l2)
coeff = self.args[0]
if coeff.is_Number:
return coeff, self.args[1:]
return S.Zero, self.args
def _eval_derivative(self, s):
return Add(*[f.diff(s) for f in self.args])
def _eval_nseries(self, x, x0, n):
terms = [t.nseries(x, x0, n) for t in self.args]
return Add(*terms)
def _matches_simple(pattern, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, factors = pattern.as_coeff_factors()
if len(factors)==1:
return factors[0].matches(expr - coeff, repl_dict)
return
matches = AssocOp._matches_commutative
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats arguments like symbols, so things like
oo - oo return 0, instead of a nan.
"""
from sympy import oo, I
if lhs == oo and rhs == oo or lhs == oo*I and rhs == oo*I:
return S.Zero
return lhs - rhs
@cacheit
def as_two_terms(self):
if len(self.args) == 1:
return S.Zero, self
return self.args[0], Add(*self.args[1:])
def as_numer_denom(self):
numers, denoms = [],[]
for n,d in [f.as_numer_denom() for f in self.args]:
numers.append(n)
denoms.append(d)
r = xrange(len(numers))
return Add(*[Mul(*(denoms[:i]+[numers[i]]+denoms[i+1:])) for i in r]),Mul(*denoms)
def count_ops(self, symbolic=True):
if symbolic:
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
Symbol('ADD') * (len(self.args) - 1)
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
(len(self.args) - 1)
def _eval_is_polynomial(self, syms):
for term in self.args:
if not term._eval_is_polynomial(syms):
return False
return True
# assumption methods
_eval_is_real = lambda self: self._eval_template_is_attr('is_real')
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr('is_commutative')
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer')
_eval_is_comparable = lambda self: self._eval_template_is_attr('is_comparable')
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even==True)]
if not l:
return False
if l[0].is_odd:
return Add(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a: return True
if a is None: return
return False
def _eval_is_positive(self):
c = self.args[0]
r = Add(*self.args[1:])
if c.is_positive and r.is_positive:
return True
if c.is_unbounded:
if r.is_unbounded:
# either c or r is negative
return
else:
return c.is_positive
elif r.is_unbounded:
return r.is_positive
if c.is_nonnegative and r.is_positive:
return True
if r.is_nonnegative and c.is_positive:
return True
if c.is_nonpositive and r.is_nonpositive:
return False
def _eval_is_negative(self):
c = self.args[0]
r = Add(*self.args[1:])
if c.is_negative and r.is_negative:
return True
if c.is_unbounded:
if r.is_unbounded:
# either c or r is positive
return
else:
return c.is_negative
elif r.is_unbounded:
return r.is_negative
if c.is_nonpositive and r.is_negative:
return True
if r.is_nonpositive and c.is_negative:
return True
if c.is_nonnegative and r.is_nonnegative:
return False
def as_coeff_terms(self, x=None):
# -2 + 2 * a -> -1, 2-2*a
if self.args[0].is_Number and self.args[0].is_negative:
return -S.One,(-self,)
return S.One,(self,)
def _eval_subs(self, old, new):
if self == old: return new
if isinstance(old, FunctionClass):
return self.__class__(*[s._eval_subs(old, new) for s in self.args ])
coeff_self, factors_self = self.as_coeff_factors()
coeff_old, factors_old = old.as_coeff_factors()
if factors_self == factors_old: # (2+a).subs(3+a,y) -> 2-3+y
return Add(new, coeff_self, -coeff_old)
if old.is_Add:
if len(factors_old) < len(factors_self): # (a+b+c+d).subs(b+c,x) -> a+x+d
self_set = set(factors_self)
old_set = set(factors_old)
if old_set < self_set:
ret_set = self_set - old_set
return Add(new, coeff_self, -coeff_old, *[s._eval_subs(old, new) for s in ret_set])
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
@cacheit
def extract_leading_order(self, *symbols):
"""
Returns the leading term and it's order.
Examples:
>>> (x+1+1/x**5).extract_leading_order(x)
((1/x**5, O(1/x**5)),)
>>> (1+x).extract_leading_order(x)
((1, O(1, x)),)
>>> (x+x**2).extract_leading_order(x)
((x, O(x)),)
"""
lst = []
seq = [(f, C.Order(f, *symbols)) for f in self.args]
for ef,of in seq:
for e,o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef,of)]
for e,o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e,o))
lst = new_lst
return tuple(lst)
def _eval_as_leading_term(self, x):
coeff, factors = self.as_coeff_factors(x)
has_unbounded = bool([f for f in self.args if f.is_unbounded])
if has_unbounded:
if isinstance(factors, Basic):
factors = factors.args
factors = [f for f in factors if not f.is_bounded]
if coeff is not S.Zero:
o = C.Order(x)
else:
o = C.Order(factors[0]*x,x)
n = 1
s = self.nseries(x, 0, n)
while s.is_Order:
n +=1
s = self.nseries(x, 0, n)
if s.is_Add:
s = s.removeO()
if s.is_Add:
lst = s.extract_leading_order(x)
return Add(*[e for (e,f) in lst])
return s.as_leading_term(x)
def _eval_power(self, other):
# n n n
# (-3 + y) -> (-1) * (3 - y)
# similar to Mul.flatten()
c, t = self.as_coeff_terms()
if c.is_negative and not other.is_integer:
if c is not S.NegativeOne:
coeff = (-c) ** other
assert len(t) == 1, 't'
b = -t[0]
return coeff*b**other
elif c is not S.One:
coeff = c ** other
assert len(t) == 1, 't'
b = t[0]
return coeff*b**other
return
def _eval_conjugate(self):
return Add(*[t.conjugate() for t in self.args])
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_multinomial'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_complex'):
newterm = term._eval_expand_complex(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args[:], []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def __neg__(self):
return Add(*[-t for t in self.args])
def _sage_(self):
s = 0
for x in self.args:
s += x._sage_()
return s
from mul import Mul
from function import FunctionClass
| {
"content_hash": "6c5558a18bf3de4486d7d29124fee1c4",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 103,
"avg_line_length": 31.948875255623722,
"alnum_prop": 0.4790373167765474,
"repo_name": "hazelnusse/sympy-old",
"id": "69ec47345bfb38a142124781b89b1b3258ad477a",
"size": "15623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/core/add.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6607956"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "2728"
}
],
"symlink_target": ""
} |
from django import forms
from django.db import models
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
# portlets imports
from lfs.catalog.settings import VARIANT
from portlets.models import Portlet
# lfs imports
from lfs.catalog.models import Category, Product
class LatestPortlet(Portlet):
"""A portlet for displaying featured products.
"""
class Meta:
app_label = 'portlet'
name = _("Latest products")
limit = models.IntegerField(_(u"Limit"), default=5)
current_category = models.BooleanField(_(u"Use current category"), default=False)
slideshow = models.BooleanField(_(u"Slideshow"), default=False)
@property
def rendered_title(self):
return self.title or self.name
def render(self, context):
"""Renders the portlet as html.
"""
request = context.get("request")
latest_products = []
products = Product.objects.filter(active=True).exclude(sub_type=VARIANT)
if self.current_category:
obj = context.get("category") or context.get("product")
if obj:
category = obj if isinstance(obj, Category) else obj.get_current_category(request)
categories = [category]
categories.extend(category.get_all_children())
filters = {"product__categories__in": categories}
products = products.filter(**filters).order_by('-creation_date')[:self.limit]
else:
products = products.order_by('-creation_date')[:self.limit]
for product in products:
if product.is_product_with_variants() and product.has_variants():
latest_products.append(product.get_default_variant())
else:
latest_products.append(product)
return render_to_string("lfs/portlets/latest.html", RequestContext(request, {
"title": self.rendered_title,
"slideshow": self.slideshow,
"products": latest_products
}))
def form(self, **kwargs):
"""
"""
return LatestForm(instance=self, **kwargs)
def __unicode__(self):
return u"%s" % self.id
class LatestForm(forms.ModelForm):
"""
"""
class Meta:
model = LatestPortlet
| {
"content_hash": "8642014ae0e5fda234e884ee41f28fc2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 98,
"avg_line_length": 32.04054054054054,
"alnum_prop": 0.6284268241248419,
"repo_name": "lichong012245/django-lfs-0.7.8",
"id": "ba5e3f35822c73aafc2114384e5efd25a00bcd26",
"size": "2388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lfs/portlet/models/latest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "109402"
},
{
"name": "JavaScript",
"bytes": "681332"
},
{
"name": "PHP",
"bytes": "1531"
},
{
"name": "Python",
"bytes": "1180906"
}
],
"symlink_target": ""
} |
"""
WSGI config for makegoalsdaily project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "makegoalsdaily.settings.test")
application = get_wsgi_application()
| {
"content_hash": "91e1e8b3ed830feb35d3422db665bc0a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 25.6875,
"alnum_prop": 0.7785888077858881,
"repo_name": "sundeep-co-in/makegoalsdaily",
"id": "2a5dddb0424e229561bf4af3e805ff323dd07739",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makegoalsdaily/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6599"
},
{
"name": "Dockerfile",
"bytes": "418"
},
{
"name": "HTML",
"bytes": "34938"
},
{
"name": "JavaScript",
"bytes": "3255"
},
{
"name": "Makefile",
"bytes": "735"
},
{
"name": "Python",
"bytes": "44053"
},
{
"name": "Roff",
"bytes": "121200"
},
{
"name": "Shell",
"bytes": "197"
}
],
"symlink_target": ""
} |
"""Classes for reading and writing VCF files.
The VCF format is described at
https://samtools.github.io/hts-specs/VCFv4.3.pdf
API for reading:
```python
from third_party.nucleus.io import vcf
with vcf.VcfReader(input_path) as reader:
for variant in reader:
print(variant)
```
API for writing:
```python
from third_party.nucleus.io import vcf
# variants is an iterable of nucleus.genomics.v1.Variant protocol buffers.
variants = ...
with vcf.VcfWriter(output_path, header=header) as writer:
for variant in variants:
writer.write(variant)
```
The class attempts to infer the file format (`TFRecord` vs VCF) from the file
path provided to the constructor.
1. For files that end with '.tfrecord' and '.tfrecord.gz' (a gzipped version),
a `TFRecord` file is assumed and is attempted to be read or written.
2. For all other files, the VCF format will be used.
VCF format used in writing is inferred from file paths:
- ending in '.bcf.gz': BGZF compressed BCF format will be written;
- ending in '.bcf': uncompressed BCF format will be written;
- ending in '.gz' and not in '.bcf.gz': BGZP compressed VCF format will be
written;
- all other suffixes: uncompressed VCF format will be written.
VCF format used in reading is inferred from the contents of the file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.nucleus.io import genomics_reader
from third_party.nucleus.io import genomics_writer
from third_party.nucleus.io.python import vcf_reader
from third_party.nucleus.io.python import vcf_writer
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.util import ranges
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import vcf_constants
def _create_get_fn_cache(fields):
"""Returns a dictionary from field to a callable that extracts its value."""
return {
field.id: vcf_constants.create_get_fn(field.type, field.number)
for field in fields
}
def _create_set_fn_cache(fields):
"""Returns a dictionary from field to a callable that sets its value."""
return {field.id: vcf_constants.SET_FN_LOOKUP[field.type] for field in fields}
class VcfHeaderCache(object):
"""This class creates a cache of accessors to structured fields in Variants.
The INFO and FORMAT fields within Variant protos are structured and typed,
with types defined by the corresponding VCF header. This cache object provides
provides {info,format}_field_{get,set}_fn functions that can be used to
extract information from the structured Variant protos based on the types
defined therein.
NOTE: Users should not need to interact with this class at all. It is used
by the variant_utils.{get,set}_info and variantcall_utils.{get,set}_format
functions for interacting with the INFO and FORMAT fields in a Variant proto.
"""
def __init__(self, header):
"""Initializer.
Args:
header: nucleus.genomics.v1.VcfHeader proto. Used to define the accessor
functions needed.
"""
if header is None:
header = variants_pb2.VcfHeader()
self._info_get_cache = _create_get_fn_cache(header.infos)
self._info_set_cache = _create_set_fn_cache(header.infos)
self._format_get_cache = _create_get_fn_cache(header.formats)
self._format_set_cache = _create_set_fn_cache(header.formats)
def info_field_get_fn(self, field_name):
"""Returns a callable that extracts the given INFO field based on its type.
Args:
field_name: str. The INFO field name of interest, e.g. 'AA', 'DB', 'AF'.
Returns:
A callable used to extract the given INFO field from a Variant proto.
"""
return self._info_get_cache[field_name]
def info_field_set_fn(self, field_name):
"""Returns a callable that sets the given INFO field based on its type."""
return self._info_set_cache[field_name]
def format_field_get_fn(self, field_name):
"""Returns a callable that gets the given FORMAT field based on its type."""
return self._format_get_cache[field_name]
def format_field_set_fn(self, field_name):
"""Returns a callable that sets the given FORMAT field based on its type."""
return self._format_set_cache[field_name]
class NativeVcfReader(genomics_reader.GenomicsReader):
"""Class for reading from native VCF files.
Most users will want to use VcfReader instead, because it dynamically
dispatches between reading native VCF files and TFRecord files based
on the filename's extensions.
"""
def __init__(self,
input_path,
excluded_info_fields=None,
excluded_format_fields=None,
store_gl_and_pl_in_info_map=False,
header=None):
"""Initializer for NativeVcfReader.
Args:
input_path: str. The path to the VCF file to read.
excluded_info_fields: list(str). A list of INFO field IDs that should not
be parsed into the Variants. If None, all INFO fields are included.
excluded_format_fields: list(str). A list of FORMAT field IDs that should
not be parsed into the Variants. If None, all FORMAT fields are
included.
store_gl_and_pl_in_info_map: bool. If True, the "GL" and "PL" FORMAT
fields are stored in the VariantCall.info map rather than as top-level
values in the VariantCall.genotype_likelihood field.
header: If not None, specifies the variants_pb2.VcfHeader. The file at
input_path must not contain any header information.
"""
super(NativeVcfReader, self).__init__()
options = variants_pb2.VcfReaderOptions(
excluded_info_fields=excluded_info_fields,
excluded_format_fields=excluded_format_fields,
store_gl_and_pl_in_info_map=store_gl_and_pl_in_info_map)
if header is not None:
self._reader = vcf_reader.VcfReader.from_file_with_header(
input_path.encode('utf8'), options, header)
else:
self._reader = vcf_reader.VcfReader.from_file(
input_path.encode('utf8'), options)
self.header = self._reader.header
self.field_access_cache = VcfHeaderCache(self.header)
def iterate(self):
"""Returns an iterable of Variant protos in the file."""
return self._reader.iterate()
def query(self, region):
"""Returns an iterator for going through variants in the region."""
return self._reader.query(region)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
@property
def c_reader(self):
"""Returns the underlying C++ reader."""
return self._reader
class VcfReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading Variant protos from VCF or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeVcfReader(input_path, **kwargs)
def _record_proto(self):
return variants_pb2.Variant
def _post_init_hook(self):
# Initialize field_access_cache. If we are dispatching to a
# NativeVcfReader, we use its field_access_cache. Otherwise, we need to
# create a new one.
self.field_access_cache = getattr(
self._reader, 'field_access_cache', VcfHeaderCache(self.header))
@property
def c_reader(self):
"""Returns the underlying C++ reader.
Note that the C++ reader might be a VcfReader or it might be a
TFRecordReader, depending on the input_path's extension.
"""
return self._reader.c_reader
class NativeVcfWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native VCF files.
Most users will want VcfWriter, which will write to either native VCF
files or TFRecords files, based on the output filename's extensions.
"""
def __init__(self,
output_path,
header=None,
round_qualities=False,
excluded_info_fields=None,
excluded_format_fields=None,
retrieve_gl_and_pl_from_info_map=False,
exclude_header=False):
"""Initializer for NativeVcfWriter.
Args:
output_path: str. The path to which to write the VCF file.
header: nucleus.genomics.v1.VcfHeader. The header that defines all
information germane to the constituent variants. This includes contigs,
FILTER fields, INFO fields, FORMAT fields, samples, and all other
structured and unstructured header lines.
round_qualities: bool. If True, the QUAL field is rounded to one point
past the decimal.
excluded_info_fields: list(str). A list of INFO field IDs that should not
be written to the output. If None, all INFO fields are included.
excluded_format_fields: list(str). A list of FORMAT field IDs that should
not be written to the output. If None, all FORMAT fields are included.
retrieve_gl_and_pl_from_info_map: bool. If True, the "GL" and "PL" FORMAT
fields are retrieved from the VariantCall.info map rather than from the
top-level value in the VariantCall.genotype_likelihood field.
exclude_header: bool. If True, write a headerless VCF.
"""
super(NativeVcfWriter, self).__init__()
if header is None:
header = variants_pb2.VcfHeader()
writer_options = variants_pb2.VcfWriterOptions(
round_qual_values=round_qualities,
excluded_info_fields=excluded_info_fields,
excluded_format_fields=excluded_format_fields,
retrieve_gl_and_pl_from_info_map=retrieve_gl_and_pl_from_info_map,
exclude_header=exclude_header,
)
self._writer = vcf_writer.VcfWriter.to_file(output_path, header,
writer_options)
self.field_access_cache = VcfHeaderCache(header)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class VcfWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing Variant protos to VCF or TFRecord files."""
def _native_writer(self,
output_path,
header,
round_qualities=False,
excluded_info_fields=None,
excluded_format_fields=None,
retrieve_gl_and_pl_from_info_map=False,
exclude_header=False):
return NativeVcfWriter(
output_path,
header=header,
round_qualities=round_qualities,
excluded_info_fields=excluded_info_fields,
excluded_format_fields=excluded_format_fields,
retrieve_gl_and_pl_from_info_map=retrieve_gl_and_pl_from_info_map,
exclude_header=exclude_header)
def _post_init_hook(self):
# Initialize field_access_cache. If we are dispatching to a
# NativeVcfWriter, we use its field_access_cache. Otherwise, we
# need to create a new one.
self.field_access_cache = getattr(
self._writer, 'field_access_cache', VcfHeaderCache(self.header))
class InMemoryVcfReader(genomics_reader.GenomicsReader):
"""Class for "reading" Variant protos from an in-memory cache of variants.
```python
from third_party.nucleus.io import vcf
from third_party.nucleus.protos import variants_pb2
variants = [... Variant protos ...]
header = variants_pb2.VcfHeader()
with vcf.InMemoryVcfReader(variants, header) as reader:
for variant in reader:
print(variant)
```
This class accepts a collection of variants and optionally a header and
provides all of the standard API functions of VcfReader but instead of
fetching variants from a file the variants are queried from an in-memory cache
of variant protos.
Note that the input variants provided to this class aren't checked in any way,
and their ordering determines the order of variants emitted by this class for
the iterate() and query() operations. This is intentional, to make this class
easy to use for testing where you often want to use less-than-perfectly formed
inputs. In order to fully meet the contract of a standard VcfReader, variants
should be sorted by their contig ordering and then by their start and finally
by their ends.
Implementation note:
The current implementation will be very slow for query() if the provided
cache of variants is large, as we do a O(n) search to collect all of the
overlapping variants for each query. There are several straightforward
optimizations to do if we need/want to scale this up. (a) sort the variants
and use a binary search to find overlapping variants (b) partition the
variants by contig, so we have dict[contig] => [variants on contig], which
allows us to completely avoid considering any variants on any other contigs.
Neither of these optimizations are worth it if len(variants) is small, but
it may be worth considering if we want to use this functionality with a
large number of variants.
"""
def __init__(self, variants, header=None):
"""Creates a VCFReader backed by a collection of variants.
Args:
variants: list of nucleus.genomics.v1.Variant protos we will "read"
from.
header: a VCFHeader object to provide as a result to calls to self.header,
or None, indicating that we don't have a header associated with this
reader.
"""
super(InMemoryVcfReader, self).__init__()
self.variants = list(variants)
self.header = header
def iterate(self):
return iter(self.variants)
def query(self, region):
return iter(
variant for variant in self.variants
if ranges.ranges_overlap(variant_utils.variant_range(variant), region)
)
| {
"content_hash": "47b5356ef4b1ab4ecf9b89b094295b72",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 80,
"avg_line_length": 38.205555555555556,
"alnum_prop": 0.696524647375309,
"repo_name": "google/deepvariant",
"id": "be3b38d364617fdc5281ec99579d8fe0ac31363d",
"size": "15276",
"binary": false,
"copies": "1",
"ref": "refs/heads/r1.4",
"path": "third_party/nucleus/io/vcf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "587559"
},
{
"name": "Dockerfile",
"bytes": "9270"
},
{
"name": "Python",
"bytes": "1617393"
},
{
"name": "Shell",
"bytes": "91210"
},
{
"name": "Starlark",
"bytes": "75694"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='tvhc',
version='0.1',
description='Simple python implementation of HTSP-Protocol (tvheadend)',
url='http://www.github.com/chwiede/tvhc',
packages=['tvhc']
)
| {
"content_hash": "a5ff552fabf33b3444f1512000f8e1e5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 78,
"avg_line_length": 28.375,
"alnum_prop": 0.6696035242290749,
"repo_name": "chwiede/tvhc",
"id": "8ba873c5eac80585d1aadffb0b8e9cf4c7fc6a16",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38220"
},
{
"name": "Shell",
"bytes": "1109"
}
],
"symlink_target": ""
} |
import colander
from ichnaea.api.schema import (
InternalMappingSchema,
InternalSchemaNode,
InternalSequenceSchema,
)
from ichnaea.api.locate.schema import (
BaseLocateSchema,
FallbackSchema,
)
RADIO_STRINGS = ['gsm', 'cdma', 'umts', 'wcdma', 'lte']
class CellsSchema(InternalSequenceSchema):
@colander.instantiate()
class SequenceItem(InternalMappingSchema):
radio = InternalSchemaNode(
colander.String(),
validator=colander.OneOf(RADIO_STRINGS), missing=None)
mcc = InternalSchemaNode(colander.Integer(), missing=None)
mnc = InternalSchemaNode(colander.Integer(), missing=None)
lac = InternalSchemaNode(colander.Integer(), missing=None)
cid = InternalSchemaNode(colander.Integer(), missing=None)
asu = InternalSchemaNode(colander.Integer(), missing=None)
psc = InternalSchemaNode(colander.Integer(), missing=None)
signal = InternalSchemaNode(colander.Integer(), missing=None)
ta = InternalSchemaNode(colander.Integer(), missing=None)
class WifisSchema(InternalSequenceSchema):
@colander.instantiate()
class SequenceItem(InternalMappingSchema):
key = InternalSchemaNode(
colander.String(), missing=None, internal_name='mac')
frequency = InternalSchemaNode(colander.Integer(), missing=None)
channel = InternalSchemaNode(colander.Integer(), missing=None)
signal = InternalSchemaNode(colander.Integer(), missing=None)
signalToNoiseRatio = InternalSchemaNode(
colander.Integer(), missing=None, internal_name='snr')
class LocateV1Schema(BaseLocateSchema):
radio = InternalSchemaNode(
colander.String(),
validator=colander.OneOf(RADIO_STRINGS), missing=None)
cell = CellsSchema(missing=())
wifi = WifisSchema(missing=())
fallbacks = FallbackSchema(missing=None)
LOCATE_V1_SCHEMA = LocateV1Schema()
| {
"content_hash": "8bcc34913aa2b4fd947e374efcbc9339",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 32.83050847457627,
"alnum_prop": 0.701600413009809,
"repo_name": "therewillbecode/ichnaea",
"id": "4a8ebf40af50eb791960e1ce65ee3c4c65b751bb",
"size": "1937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ichnaea/api/locate/locate_v1/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64264"
},
{
"name": "JavaScript",
"bytes": "1621672"
},
{
"name": "Makefile",
"bytes": "6964"
},
{
"name": "Mako",
"bytes": "432"
},
{
"name": "Python",
"bytes": "691003"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
"""
ghostly.django.testcase
~~~~~~~~~~~~~~~~~~~~~~~
Module containing GhostlyDjangoTestCase.
"""
from __future__ import absolute_import, print_function, unicode_literals
try:
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
except ImportError:
# Django < 1.7
from django.test import LiveServerTestCase as StaticLiveServerTestCase
from ghostly import Ghostly
class GhostlyDjangoTestCase(StaticLiveServerTestCase):
"""
Django TestCase that allows you to define your Ghostly tests pragmatically.
This class is mostly a light weight wrapper around Ghostly.
"""
driver = 'PhantomJS'
maximise_window = True
def setUp(self):
super(GhostlyDjangoTestCase, self).setUp()
self.ghostly = Ghostly(self.driver, maximise_window=self.maximise_window)
self.addCleanup(self.ghostly.end)
#self.live_server_url = 'localhost:8000'
def goto(self, url):
"""
Helper method to perform a HTTP GET with support for relative URLs.
:param url: The URL to retrieve, if relative the test servers URL is
prepended.
:type url: str
:param assert_statuses: A list of acceptable status codes.
:type assert_statuses: list
"""
if url.startswith('/'):
# Append the server URL to the url
url = self.live_server_url + url
self.ghostly.driver.get(url)
def assertCurrentUrl(self, expected):
"""
Assert the current URL is equal to expected.
:param expected: Expected URL, if relative the test servers URL is
prepended.
"""
if expected.startswith('/'):
# Append the server URL to the url
expected = self.live_server_url + expected
self.assertEqual(self.ghostly.driver.current_url, expected)
def assertXpathEqual(self, xpath, expected):
element = self.ghostly.xpath(xpath)
self.assertEqual(
element.text,
expected,
"Expected xpath '%s' to be equal to '%s' not '%s'." % (
xpath,
expected,
element.text))
def assertXpathVisible(self, xpath):
element = self.ghostly.xpath(xpath)
self.assertTrue(element.is_displayed(),
"Expected xpath '%s' to be visible." % xpath)
def assertXpathNotVisible(self, xpath):
element = self.ghostly.xpath(xpath)
self.assertFalse(element.is_displayed(),
"Expected xpath '%s' to not be visible." % xpath)
| {
"content_hash": "6e3c511a9fa4dba0194bebfe791d954a",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 81,
"avg_line_length": 31.59036144578313,
"alnum_prop": 0.6147978642257819,
"repo_name": "alexhayes/ghostly",
"id": "84b88c2cdb0a8297effffdbf7af24c5c32b6af76",
"size": "2646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghostly/django/testcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1787"
},
{
"name": "Python",
"bytes": "23801"
},
{
"name": "Shell",
"bytes": "774"
}
],
"symlink_target": ""
} |
"""
A module of handy plotting functions and shortcuts for Time-Distance diagrams
"""
from __future__ import absolute_import, division, print_function
import os
import glob
import itertools
import numpy as np
from scipy.interpolate import interp1d
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.image import NonUniformImage
from mpl_toolkits.axes_grid1 import make_axes_locatable
__all__ = ['glob_files', 'calc_avgs', 'single_plot', 'triple_plot', 'triple_triple_plot',
'get_single_percentage_flux', 'get_single_velocity', 'get_single_bpert',
'get_triple', 'get_xy']
xxlim = -1
def betaswap(b,pos):
return "$%3.2f$"%(b)
def glob_files(cfg, tube_r, search):
"""
Search in the configs data directory for the following pattern:
{data_dir}+{tube_r}+{search}
Parameters
----------
tube_r: string
The tube radius string to search for
search: string
The rest of the search string
Returns
-------
files: list
A sorted files list
"""
files = glob.glob(os.path.join(cfg.data_dir,tube_r,search))
files.sort()
return files
def calc_avgs(cfg, tube_r, periods, amps, runtime=600.):
"""
Calculate the average values of Fpar, Fperp and Fphi over all time for an
integer number of periods.
Parameters
----------
cfg : scripts.sacconfig.SACConfig instance
The repo config to use
tube_r : string
The tube radius to use
periods : ndarray
List of all periods
amps : list of strings
List of all corresponding amplitudes
runtime : float
Total runtime of the simulation (to calculate integer periods)
Returns
-------
AvgsP : ndarray
A 3 x len(periods) array of average fluxes (Par, Perp, Phi)
"""
int_periods = np.floor(runtime/periods)*periods
AvgsP = np.zeros([3,len(periods)])
for i, period, amp in itertools.izip(range(len(periods)), periods, amps):
cfg.period = period
cfg.amp = amp
times = np.load(os.path.join(cfg.data_dir, 'Times_{}.npy'.format(cfg.get_identifier())))
max_index = np.argmin(np.abs(int_periods[i] - times))
Fpar, Fperp, Fphi = map(np.load,glob_files(cfg, tube_r, 'LineFlux*Fp*npy'))
#Fpar, Fperp, Fphi = map(np.load,glob_files(cfg, tube_r, '*vp*npy'))
Fpar[np.abs(Fpar)<1e-5], Fperp[np.abs(Fperp)<1e-5], Fphi[np.abs(Fphi)<1e-5] = 0., 0., 0.
Fpar, Fperp, Fphi = Fpar[:max_index,:], Fperp[:max_index,:], Fphi[:max_index,:]
Ftot2 = (Fpar**2 + Fperp**2 + Fphi**2)
Fpar2, Fperp2, Fphi2 = np.array([Fpar, Fperp, Fphi])**2
FparP, FperpP, FphiP = (Fpar2/Ftot2)*100, (Fperp2/Ftot2)*100, (Fphi2/Ftot2)*100
FparP = np.mean(np.ma.masked_array(FparP,np.isnan(FparP)))
FperpP = np.mean(np.ma.masked_array(FperpP,np.isnan(FperpP)))
FphiP = np.mean(np.ma.masked_array(FphiP,np.isnan(FphiP)))
AvgsP[:, i] = FparP, FperpP, FphiP
return AvgsP
def calc_avgs_period(cfg, tube_r, maxtime):
"""
Calculate the average values of Fpar, Fperp and Fphi for one state of the
configuration object.
Parameters
----------
cfg : scripts.sacconfig.SACConfig instance
The repo config to use
tube_r : string
The tube radius to use
maxtime : `int`
The maximum time to calculate the average to.
Returns
-------
FparP, FperpP, FphiP : ndarray
Three arrays with the components of the wave fulx averaged.
"""
times = np.load(os.path.join(cfg.data_dir, 'Times_{}.npy'.format(cfg.get_identifier())))
max_index = np.argmin(np.abs(maxtime - times))
Fpar, Fperp, Fphi = map(np.load,glob_files(cfg, tube_r, 'LineFlux*Fp*npy'))
#Fpar, Fperp, Fphi = map(np.load,glob_files(cfg, tube_r, '*vp*npy'))
Fpar[np.abs(Fpar)<1e-5], Fperp[np.abs(Fperp)<1e-5], Fphi[np.abs(Fphi)<1e-5] = 0., 0., 0.
Fpar, Fperp, Fphi = Fpar[:max_index,:], Fperp[:max_index,:], Fphi[:max_index,:]
Ftot2 = (Fpar**2 + Fperp**2 + Fphi**2)
Fpar2, Fperp2, Fphi2 = np.array([Fpar, Fperp, Fphi])**2
FparP, FperpP, FphiP = (Fpar2/Ftot2)*100, (Fperp2/Ftot2)*100, (Fphi2/Ftot2)*100
FparP = np.mean(np.ma.masked_array(FparP,np.isnan(FparP)))
FperpP = np.mean(np.ma.masked_array(FperpP,np.isnan(FperpP)))
FphiP = np.mean(np.ma.masked_array(FphiP,np.isnan(FphiP)))
return FparP, FperpP, FphiP
def get_all_avgs(cfg, tube_r, sim_params):
"""
Calculate a set of wave flux averages for different states of the config
object, as given by the sim_params namedtuple
Parameters
----------
cfg : scripts.sacconfig.SACConfig instance
The repo config to use
tube_r : string
The tube radius to use.
sim_params : namedtuple
A named tuple with attributes matching the names of configuration
parameters.
Returns
-------
avgs : ndarray
A 3xlen(sim_params) array of average wave fluxes. Ordered (Par, Perp, Phi)
"""
avgs = np.zeros([3, len(sim_params)])
for i,pfa in enumerate(sim_params):
[setattr(cfg, f, getattr(pfa, f)) for f in pfa._fields]
int_periods = np.floor(cfg.runtime/cfg.period)*cfg.period
avgs[:, i] = calc_avgs_period(cfg, tube_r, int_periods)
return avgs
def single_plot(data, x, y, axes=None, beta=None, cbar_label='',
cmap=plt.get_cmap('RdBu'), vmin=None, vmax=None,
phase_speeds=True, manual_locations=False, **kwargs):
"""
Plot a single frame Time-Distance Diagram on physical axes.
This function uses mpl NonUniformImage to plot a image using x and y arrays,
it will also optionally over plot in contours beta lines.
Parameters
----------
data: np.ndarray
The 2D image to plot
x: np.ndarray
The x coordinates
y: np.ndarray
The y coordinates
axes: matplotlib axes instance [*optional*]
The axes to plot the data on, if None, use plt.gca().
beta: np.ndarray [*optional*]
The array to contour over the top, default to none.
cbar_label: string [*optional*]
The title label for the colour bar, default to none.
cmap: A matplotlib colour map instance [*optional*]
The colourmap to use, default to 'RdBu'
vmin: float [*optional*]
The min scaling for the image, default to the image limits.
vmax: float [*optional*]
The max scaling for the image, default to the image limits.
phase_speeds : bool
Add phase speed lines to the plot
manual_locations : bool
Array for clabel locations.
Returns
-------
None
"""
if axes is None:
axes = plt.gca()
x = x[:xxlim]
data = data[:,:xxlim]
im = NonUniformImage(axes,interpolation='nearest',
extent=[x.min(),x.max(),y.min(),y.max()],rasterized=False)
im.set_cmap(cmap)
if vmin is None and vmax is None:
lim = np.max([np.nanmax(data),
np.abs(np.nanmin(data))])
im.set_clim(vmax=lim,vmin=-lim)
else:
im.set_clim(vmax=vmax,vmin=vmin)
im.set_data(x,y,data)
im.set_interpolation('nearest')
axes.images.append(im)
axes.set_xlim(x.min(),x.max())
axes.set_ylim(y.min(),y.max())
cax0 = make_axes_locatable(axes).append_axes("right", size="5%", pad=0.05)
cbar0 = plt.colorbar(im, cax=cax0, ticks=mpl.ticker.MaxNLocator(7))
cbar0.set_label(cbar_label)
cbar0.solids.set_edgecolor("face")
kwergs = {'levels': [1., 1/3., 1/5., 1/10., 1/20.]}
kwergs.update(kwargs)
if beta is not None:
ct = axes.contour(x,y,beta[:,:xxlim],colors=['k'], **kwergs)
plt.clabel(ct,fontsize=14,inline_spacing=3, manual=manual_locations,
fmt=mpl.ticker.FuncFormatter(betaswap))
axes.set_xlabel("Time [s]")
axes.set_ylabel("Height [Mm]")
def triple_plot(ax, x, y, par_line, perp_line, phi_line, beta_line=None,
par_label='', perp_label='', phi_label='', title='', **kwargs):
"""
Plot the three components of a T-D diagram.
Parameters
----------
ax: np.ndarray
An array of axes, 1D.
x: np.ndarray
The x coordinates.
y: np.ndarray
The y coordinates.
par_line: np.ndarray
The first T-D array.
perp_line: np.ndarray
The second T-D array.
phi_line: np.ndarray
The third T-D array.
beta_line: np.ndarray [*optional*]
The beta array to over plot.
par_label: string [*optional*]
The colour bar label for the fist array.
perp_label: string [*optional*]
The colour bar label for the second array.
phi_label: string [*optional*]
The colour bar label for the third array.
title: string [*optional*]
The title for above the top image.
"""
ax[0].set_title(title)
single_plot(par_line.T[::-1,:], x, y, axes=ax[0],
cbar_label=par_label, beta=beta_line, **kwargs)
single_plot(perp_line.T[::-1,:], x, y, axes=ax[1],
cbar_label=perp_label, beta=beta_line, **kwargs)
single_plot(phi_line.T[::-1,:], x, y, axes=ax[2],
cbar_label=phi_label, beta=beta_line, **kwargs)
def triple_triple_plot(title,
x_r10, y_r10, beta_line_r10, par_line_r10, perp_line_r10, phi_line_r10,
x_r30, y_r30, beta_line_r30, par_line_r30, perp_line_r30, phi_line_r30,
x_r60, y_r60, beta_line_r60, par_line_r60, perp_line_r60, phi_line_r60,
par_label = r'', perp_label = r'', phi_label = r'',**kwargs):
"""
This function plots all 3 components for all 3 tube radii.
This function creates and returns a figure, with 9 subplots.
Parameters
----------
title: string
The whole figure title
x_r10: np.ndarray
x coordinate array for first column.
y_r10: np.ndarray
x coordinate array for first column.
beta_line_r10: np.ndarray
x coordinate array for first column.
par_line_r10: np.ndarray
x coordinate aray for first column.
perp_line_r10: np.ndarray
x coordinate array for first column.
phi_line_r10: np.ndarray
x coordinate array for first column.
x_r30: np.ndarray
x coordinate array for second column.
y_r30: np.ndarray
x coordinate array for second column.
beta_line_r30: np.ndarray
x coordinate array for second column.
par_line_r30: np.ndarray
x coordinate array for second column.
perp_line_r30: np.ndarray
x coordinate array for second column.
phi_line_r30: np.ndarray
x coordinate array for second column.
x_r60: np.ndarray
x coordinate array for third column.
y_r60: np.ndarray
x coordinate array for third column.
beta_line_r60: np.ndarray
x coordinate array for third column.
par_line_r60: np.ndarray
x coordinate array for third column.
perp_line_r60: np.ndarray
x coordinate array for third column.
phi_line_r60: np.ndarray
x coordinate array for third column.
par_label: string
Label for parallel color bar
perp_label: string
Label for perp color bar
phi_label: string
Label for phi color bar
Returns
-------
figure: matplotlib.figure.Figure
The figure containing all the plots
"""
fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(15,6))
fig.suptitle(title, y=1.05)
#r10
triple_plot(ax[:,0], x_r10, y_r10,
par_line_r10, perp_line_r10, phi_line_r10,
beta_line=beta_line_r10, par_label=par_label,
perp_label=perp_label, phi_label=phi_label,
title="Flux Surface Radius $= 158$ km", **kwargs)
#r30
triple_plot(ax[:,1], x_r30, y_r30,
par_line_r30, perp_line_r30, phi_line_r30,
beta_line=beta_line_r30, par_label=par_label,
perp_label=perp_label, phi_label=phi_label,
title="Flux Surface Radius $= 468$ km", **kwargs)
#r60
triple_plot(ax[:,2], x_r60, y_r60,
par_line_r60, perp_line_r60, phi_line_r60,
beta_line=beta_line_r60, par_label=par_label,
perp_label=perp_label, phi_label=phi_label,
title="Flux Surface Radius $= 936$ km", **kwargs)
plt.tight_layout()
return fig, ax
def add_phase_speeds(ax, x, y, va_line, cs_line, x_shift=60, color='k', dx_scale=1.):
"""
Plot the for phase speed lines on an axes
"""
delta_x = np.zeros(y.shape)
delta_x[1:] = y[1:] - y[:-1]
delta_x *= dx_scale
delta_t_va = delta_x / va_line[:,0]
delta_t_cs = delta_x / cs_line[:,0]
delta_t_vf = delta_x / np.sqrt(cs_line[:,0]**2 + va_line[:,0]**2)
delta_t_vs = delta_x / np.sqrt(cs_line[:,0]**-2 + va_line[:,0]**-2)**-1
t_va = np.cumsum(delta_t_va) + x_shift
t_cs = np.cumsum(delta_t_cs) + x_shift
t_vf = np.cumsum(delta_t_vf) + x_shift
t_vs = np.cumsum(delta_t_vs) + x_shift
ax.plot(t_va, y, label=r"$V_A$", linewidth=2, linestyle=':', color=color)#b
ax.plot(t_cs, y, label=r"$C_s$", linewidth=2, linestyle='--', color=color)#g
ax.plot(t_vf, y, label=r"$V_f$", linewidth=2, linestyle='-.', color=color)#r
ax.plot(t_vs, y, label=r"$V_s$", linewidth=2, linestyle='-', color=color)#c
def get_phase_speeds(cfg, tube_r):
"""
Read in the data for phase speed plotting.
"""
x, y = get_xy(cfg, tube_r)
va_line = np.load(glob_files(cfg, tube_r, '*va.npy')[0]).T * 1e-6
cs_line = np.load(glob_files(cfg, tube_r, '*cs.npy')[0]).T * 1e-6
x_shift = cfg.period / 4.
return {'x':x, 'y':y, 'va_line':va_line,
'cs_line':cs_line, 'x_shift': x_shift}
def get_xy(cfg, tube_r):
"""
read in from file and interpolate the x and y coords
"""
#There is a bug in the gdf files that mean the x array is wrong:
height_Mm = np.linspace(0.03664122,1.5877863,128)
f = interp1d(np.linspace(0,128,128),height_Mm)
all_spoints = np.load(glob_files(cfg, tube_r,'*points*npy')[0])[:,::-1,:]
all_times = np.load(glob_files(cfg, tube_r,'*times*npy')[0])
y = f(all_spoints[0,:,2])
x = all_times[:-1]
return x,y
def get_single_velocity(cfg, tube_r, search='*_vp*npy', beta=True,
triple_triple=False):
"""
Read in the args for single_plot from files
"""
kwargs = {}
vpar, vperp, vphi = map(np.load,glob_files(cfg, tube_r, search))
if triple_triple:
tube_r2 = '_'+tube_r
else:
tube_r2 =''
[kwargs['par_line%s'%tube_r2],
kwargs['perp_line%s'%tube_r2],
kwargs['phi_line%s'%tube_r2]] = vpar, vperp, vphi
kwargs['x%s'%tube_r2],kwargs['y%s'%tube_r2] = get_xy(cfg, tube_r)
if beta:
kwargs['beta_line%s'%tube_r2] = np.load(glob_files(cfg, tube_r,'*beta*npy')[0]).T
else:
kwargs['beta_line%s'%tube_r2] = None
return kwargs
def get_single_bpert(cfg, tube_r, search='*bpert*npy', beta=True,
triple_triple=False):
"""
Read in the args for single_plot from files
"""
kwargs = {}
bppar, bpperp, bpphi = map(np.load,glob_files(cfg, tube_r, search))
if triple_triple:
tube_r2 = '_'+tube_r
else:
tube_r2 =''
[kwargs['par_line%s'%tube_r2],
kwargs['perp_line%s'%tube_r2],
kwargs['phi_line%s'%tube_r2]] = bppar/1e11, bpperp/1e11, bpphi/1e11
kwargs['x%s'%tube_r2],kwargs['y%s'%tube_r2] = get_xy(cfg, tube_r)
if beta:
kwargs['beta_line%s'%tube_r2] = np.load(glob_files(cfg, tube_r,'*beta*npy')[0]).T
else:
kwargs['beta_line%s'%tube_r2] = None
return kwargs
def get_single_percentage_flux(cfg, tube_r, search='LineFlux*Fp*npy', beta=True,
triple_triple=False):
"""
Read in the args for single_plot from files
"""
kwargs = {}
Fpar, Fperp, Fphi = map(np.load,glob_files(cfg, tube_r, search))
Ftot = np.sqrt(Fpar**2 + Fperp**2 + Fphi**2)
Fpar[Ftot<1e-5], Fperp[Ftot<1e-5], Fphi[Ftot<1e-5] = 0., 0., 0.
if triple_triple:
tube_r2 = '_'+tube_r
else:
tube_r2 =''
[kwargs['par_line%s'%tube_r2],
kwargs['perp_line%s'%tube_r2],
kwargs['phi_line%s'%tube_r2]] = Fpar/Ftot, Fperp/Ftot, Fphi/Ftot
kwargs['x%s'%tube_r2],kwargs['y%s'%tube_r2] = get_xy(cfg, tube_r)
if beta:
kwargs['beta_line%s'%tube_r2] = np.load(glob_files(cfg, tube_r,'*beta*npy')[0]).T
else:
kwargs['beta_line%s'%tube_r2] = None
return kwargs
def get_triple(cfg, single='velocity',**kwergs):
"""
Read in the args for a triple triple plot
"""
if single == 'velocity':
get_single = get_single_velocity
elif single == 'percentage_flux':
get_single = get_single_percentage_flux
elif single == 'bpert':
get_single = get_single_bpert
kwargs = {}
kwargs.update(get_single(cfg, 'r10', triple_triple=True, **kwergs))
kwargs.update(get_single(cfg, 'r30', triple_triple=True, **kwergs))
kwargs.update(get_single(cfg, 'r60', triple_triple=True, **kwergs))
return kwargs
| {
"content_hash": "a0d5005f293867759d7850be3c9c104d",
"timestamp": "",
"source": "github",
"line_count": 520,
"max_line_length": 96,
"avg_line_length": 33.11730769230769,
"alnum_prop": 0.6003135706404971,
"repo_name": "Cadair/Thesis",
"id": "7d4ea2fae0a5733f76f60f9bcc6a18e75b692976",
"size": "17245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thesis/Python/td_plotting_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136664"
},
{
"name": "Python",
"bytes": "73629"
},
{
"name": "TeX",
"bytes": "487132"
}
],
"symlink_target": ""
} |
from django.conf import settings
PIWIK_TRACKING_API_URL = getattr(settings, 'PIWIK_TRACKING_API_URL', False)
PIWIK_SITE_ID = getattr(settings, 'PIWIK_SITE_ID', False)
PIWIK_TOKEN_AUTH = getattr(settings, 'PIWIK_TOKEN_AUTH', False) | {
"content_hash": "19bcd291278965ebeb84bae6bf4a88fc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 75,
"avg_line_length": 46.2,
"alnum_prop": 0.7619047619047619,
"repo_name": "daddz/django-piwik-middleware",
"id": "8d6f097ab18f8ae16ce0ed119aa225014ffce1d5",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piwik/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2272"
}
],
"symlink_target": ""
} |
import requests
BASE_URL = 'http://api.soundcloud.com'
CLIENT_ID = 'ce8f07daeb00017dc254362a3f083b22'
def get_comments(tracks):
comment_dict = {}
for track in tracks:
url = '{}/comments?'.format(BASE_URL)
params = {
'client_id': CLIENT_ID,
'track_id': track['id'],
'limit': 200,
}
payload = requests.get(url, params=params)
comments = payload.json()
if isinstance(comments, dict):
print(comments.get('errors'))
continue
for comment in comments:
comment_dict[comment['id']] = comment
return comment_dict
def get_tracks(offset=0):
url = '{}/tracks?'.format(BASE_URL)
params = {
'client_id': CLIENT_ID,
'order': 'hotness',
'filter': 'downloadable',
'limit': 200,
'offset': offset,
}
payload = requests.get(url, params=params)
tracks = payload.json()
filtered_tracks = filter_tracks(tracks)
track_dict = {}
for track in filtered_tracks:
track_dict[track['id']] = track
return track_dict
def filter_tracks(tracks):
for track in tracks:
if track['downloadable'] and track['commentable']:
yield track
| {
"content_hash": "cc6d07ad7e9a8d8a034b197b25bab6d7",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 58,
"avg_line_length": 26.574468085106382,
"alnum_prop": 0.5724579663730984,
"repo_name": "MIR-focus/sampletime",
"id": "0803ee880495567d8c94c55b606582e0a897fbf2",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soundcloud/contextual_tagging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2707"
},
{
"name": "HTML",
"bytes": "1332"
},
{
"name": "JavaScript",
"bytes": "1473288"
},
{
"name": "Python",
"bytes": "1249"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
f = open('README.rst')
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[1]
f.close()
# Requirements to install buffet plugins and engines
_extra_genshi = ["Genshi >= 0.3.5"]
_extra_mako = ["Mako >= 0.1.1"]
setup(
name='tw2.jqplugins.gritter',
version='2.0.3',
description='toscawidgets2 wrapper for jquery gritter plugin.',
long_description=long_description,
author='Ralph Bean',
author_email='rbean@redhat.com',
license='MIT',
url='http://github.com/toscawidgets/tw2.jqplugins.gritter',
install_requires=[
"simplejson",
"tw2.core>=2.0b2",
"tw2.jquery",
"tw2.jqplugins.ui>=2.0b7",
],
extras_require = {
'genshi': _extra_genshi,
'mako': _extra_mako,
},
packages=['tw2', 'tw2.jqplugins', 'tw2.jqplugins.gritter'],
namespace_packages = ['tw2', 'tw2.jqplugins'],
zip_safe=False,
include_package_data=True,
entry_points="""
[tw2.widgets]
# Register your widgets so they can be listed in the WidgetBrowser
widgets = tw2.jqplugins.gritter
""",
keywords = [
'toscawidgets.widgets',
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Web Environment :: ToscaWidgets',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Widget Sets',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| {
"content_hash": "167fba2dc73b334b970d2ad258ee3f31",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 31.641509433962263,
"alnum_prop": 0.616577221228384,
"repo_name": "toscawidgets/tw2.jqplugins.gritter",
"id": "612b27ca0bd449df4f357877c5f32eccf951c278",
"size": "1677",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "11475"
},
{
"name": "Python",
"bytes": "2829"
}
],
"symlink_target": ""
} |
"""Simple report that displays a graph of the CDF of the given
distribution."""
from matplotlib import pyplot
from model import node_plot
# Number of samples from distribution to be plotted, pre-interpolation.
NUM_SAMPLES = 100
# Number of interpolated values in plot.
GRAPH_RESOLUTION = 1000
def report(node, args):
"""Generate and display the graph."""
node_plot.multi_cdf_prep(node, args)
pyplot.show()
| {
"content_hash": "f1cde9edfab1b979b87a95490026670b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 23.555555555555557,
"alnum_prop": 0.7358490566037735,
"repo_name": "ggould256/libpmp",
"id": "64af6d4c90e5f695fbdc138e2fc5e43de4e0d857",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report/display_cdf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "63087"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.