repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mekkablue/Glyphs-Scripts
|
Build Glyphs/Quote Manager.py
|
1
|
20549
|
#MenuTitle: Quote Manager
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Build double quotes from single quotes, and insert #exit and #entry anchors in the single quotes for auto-alignment.
"""
import vanilla
from Foundation import NSPoint
names = {
"quotesinglbase": "quotedblbase",
"quoteleft": "quotedblleft",
"quoteright": "quotedblright",
"quotesingle": "quotedbl",
}
class QuoteManager( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 480
windowHeight = 275
windowWidthResize = 400 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Quote Manager: build and align quotes", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.QuoteManager.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 24
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), "Syncs single and double quotes with cursive attachment. Reports in Macro Window.", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.defaultQuoteText = vanilla.TextBox( (inset, linePos+2, 90, 14), "Default quotes:", sizeStyle='small', selectable=True )
self.w.defaultQuote = vanilla.PopUpButton( (inset+90, linePos, -inset, 17), ["%s/%s" % (name,names[name]) for name in names], sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
self.w.syncWithDefaultQuote = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Sync all quotes with default quotes (metrics keys, anchor placement)", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.syncWithDefaultQuote.getNSButton().setToolTip_("If enabled, the default quotes will be taken as reference for metrics keys and distance between #exit and #entry anchors.")
linePos += lineHeight
self.w.excludeDumbQuotes = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Ignore straight dumb quotes (quotesingle, quotedbl)", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.excludeDumbQuotes.getNSButton().setToolTip_("For most actions, tthis option allows you to ignore the (straight) dumb quotes. The Kerning Group button will ignore this setting and always set the groups for the straight quote.")
linePos += lineHeight
self.w.suffixText = vanilla.TextBox( (inset, linePos+2, 270, 14), "Suffix for all quotes involved (leave blank if none):", sizeStyle='small', selectable=True )
self.w.suffix = vanilla.EditText( (inset+270, linePos-1, -inset, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.suffix.getNSTextField().setToolTip_(u"E.g., ‘case’ for .case variants. Entry with or without the leading period. Leave blank for the default quotes (without dot suffixes).")
linePos += lineHeight
self.w.openTabWithAffectedGlyphs = vanilla.CheckBox( (inset, linePos-1, 200, 20), "Open tab with affected glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.openTabWithAffectedGlyphs.getNSButton().setToolTip_("Whatever action you take, this option makes sure a new tab will be opened with all the glyphs affected.")
self.w.reuseTab = vanilla.CheckBox( (inset+200, linePos-1, -inset, 20), u"Reuse current tab", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.reuseTab.getNSButton().setToolTip_(u"Instead of opening a new tab, will reuse the current tab. Highly recommended.")
linePos += lineHeight
self.w.buildDoublesButton = vanilla.Button( (inset, linePos, 130, 18), "Add Components", sizeStyle='small', callback=self.buildDoublesMain )
self.w.buildDoublesText = vanilla.TextBox( (inset+135, linePos+2, -inset, 14), "Insert single quotes as components in double quotes", sizeStyle='small', selectable=True )
tooltip = "Do this first. Then adjust the position of the second component in the default double quote. Inserting anchors (the next button) will take the distance between the components into account. Or follow the instructions in the tooltip of the next button. Then press the Insert Anchors button."
self.w.buildDoublesButton.getNSButton().setToolTip_(tooltip)
self.w.buildDoublesText.getNSTextField().setToolTip_(tooltip)
linePos += lineHeight
self.w.insertAnchorsButton = vanilla.Button( (inset, linePos, 130, 18), "Insert Anchors", sizeStyle='small', callback=self.insertAnchorsMain )
self.w.insertAnchorsText = vanilla.TextBox( (inset+135, linePos+2, -inset, 14), "Insert #exit and #entry anchors in single quotes", sizeStyle='small', selectable=True )
tooltip = "Hint: After you have done the previous steps, FIRST press button to insert the anchors, THEN adjust the width between the anchors in your default quote, THEN press this button again to sync all other #exit and #entry anchors with the default quotes."
self.w.insertAnchorsButton.getNSButton().setToolTip_(tooltip)
self.w.insertAnchorsText.getNSTextField().setToolTip_(tooltip)
linePos += lineHeight
self.w.metricKeyButton = vanilla.Button( (inset, linePos, 130, 18), "Add Keys", sizeStyle='small', callback=self.metricKeyMain )
self.w.metricKeyText = vanilla.TextBox( (inset+135, linePos+2, -inset, 14), "Apply metrics keys to single quotes", sizeStyle='small', selectable=True )
tooltip = "Adds Metrics Keys to single quotes, so your quotes are all in sync and have the same width. Double quotes should use automatic alignment by now."
self.w.metricKeyButton.getNSButton().setToolTip_(tooltip)
self.w.metricKeyText.getNSTextField().setToolTip_(tooltip)
linePos += lineHeight
self.w.kernGroupButton = vanilla.Button( (inset, linePos, 130, 18), "Set Groups", sizeStyle='small', callback=self.kernGroupMain )
self.w.kernGroupText = vanilla.TextBox( (inset+135, linePos+2, -inset, 14), "Set kern groups (based on singles)", sizeStyle='small', selectable=True )
tooltip = "Sync kern groups between double and single quotes."
self.w.kernGroupButton.getNSButton().setToolTip_(tooltip)
self.w.kernGroupText.getNSTextField().setToolTip_(tooltip)
linePos += lineHeight
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Build and align quotes' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateUI(self, sender=None):
self.w.reuseTab.enable(self.w.openTabWithAffectedGlyphs.get())
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.QuoteManager.defaultQuote"] = self.w.defaultQuote.get()
Glyphs.defaults["com.mekkablue.QuoteManager.syncWithDefaultQuote"] = self.w.syncWithDefaultQuote.get()
Glyphs.defaults["com.mekkablue.QuoteManager.suffix"] = self.w.suffix.get()
Glyphs.defaults["com.mekkablue.QuoteManager.excludeDumbQuotes"] = self.w.excludeDumbQuotes.get()
Glyphs.defaults["com.mekkablue.QuoteManager.openTabWithAffectedGlyphs"] = self.w.openTabWithAffectedGlyphs.get()
Glyphs.defaults["com.mekkablue.QuoteManager.reuseTab"] = self.w.reuseTab.get()
self.updateUI()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.QuoteManager.defaultQuote", 0)
Glyphs.registerDefault("com.mekkablue.QuoteManager.syncWithDefaultQuote", 0)
Glyphs.registerDefault("com.mekkablue.QuoteManager.suffix", "")
Glyphs.registerDefault("com.mekkablue.QuoteManager.excludeDumbQuotes", 0)
Glyphs.registerDefault("com.mekkablue.QuoteManager.openTabWithAffectedGlyphs", 0)
Glyphs.registerDefault("com.mekkablue.QuoteManager.reuseTab", 1)
self.w.defaultQuote.set( Glyphs.defaults["com.mekkablue.QuoteManager.defaultQuote"] )
self.w.syncWithDefaultQuote.set( Glyphs.defaults["com.mekkablue.QuoteManager.syncWithDefaultQuote"] )
self.w.suffix.set( Glyphs.defaults["com.mekkablue.QuoteManager.suffix"] )
self.w.excludeDumbQuotes.set( Glyphs.defaults["com.mekkablue.QuoteManager.excludeDumbQuotes"] )
self.w.openTabWithAffectedGlyphs.set( Glyphs.defaults["com.mekkablue.QuoteManager.openTabWithAffectedGlyphs"] )
self.w.reuseTab.set( Glyphs.defaults["com.mekkablue.QuoteManager.reuseTab"] )
self.updateUI()
except:
return False
return True
def getDotSuffix(self):
dotSuffix = Glyphs.defaults["com.mekkablue.QuoteManager.suffix"].strip().lstrip(".")
# clean up:
if dotSuffix:
dotSuffix = ".%s" % dotSuffix
return dotSuffix
def openTabIfRequested(self):
if Glyphs.defaults["com.mekkablue.QuoteManager.openTabWithAffectedGlyphs"]:
Font = Glyphs.font
suffix = self.getDotSuffix()
tabString = ""
for name in names:
for singleOrDoubleName in (name, names[name]):
suffixedName = singleOrDoubleName+suffix
if Font.glyphs[suffixedName]:
tabString += "/%s" % suffixedName
if tabString:
if Font.currentTab and Glyphs.defaults["com.mekkablue.QuoteManager.reuseTab"]:
Font.currentTab.text = tabString
else:
Font.newTab(tabString)
else:
print(u"⚠️ WARNING: None of the required glyphs in the font. No new tab opened.")
def reportFont(self):
Font = Glyphs.font
print("Font: %s" % Font.familyName)
print("Path: %s\n" % Font.filepath)
def reportMissingGlyph(self, glyphName):
print(u"⚠️ WARNING: %s not in font. Skipping." % glyphName)
def reportMetricKeys(self, glyphName):
print(u"✅ Updated Metrics Keys for: %s" % glyphName)
def defaultQuotes(self, dotSuffix=""):
if Glyphs.defaults["com.mekkablue.QuoteManager.syncWithDefaultQuote"]:
defaultSingle = self.w.defaultQuote.getItem()
defaultSingle = defaultSingle[:defaultSingle.find("/")]
defaultDouble = names[defaultSingle]
if dotSuffix:
defaultSingle += dotSuffix
defaultDouble += dotSuffix
print("\nReference quotes: %s/%s" % (defaultSingle, defaultDouble))
else:
defaultSingle = None
defaultDouble = None
return defaultSingle, defaultDouble
def kernGroupMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Quote Manager' could not write preferences.")
Glyphs.clearLog()
Font = Glyphs.font # frontmost font
dotSuffix = self.getDotSuffix()
# report:
self.reportFont()
for keyGlyphName in names:
singleQuoteName = "%s%s" % (keyGlyphName, dotSuffix)
singleQuote = Font.glyphs[singleQuoteName]
doubleQuoteName = "%s%s" % (names[keyGlyphName], dotSuffix)
doubleQuote = Font.glyphs[doubleQuoteName]
print("\nSetting kern groups for: %s, %s" % (singleQuoteName, doubleQuoteName))
if not singleQuote:
self.reportMissingGlyph(singleQuoteName)
elif not doubleQuote:
self.reportMissingGlyph(doubleQuoteName)
else:
for glyph in (singleQuote, doubleQuote):
glyph.leftKerningGroup = singleQuoteName
glyph.rightKerningGroup = singleQuoteName
print(u"✅ Successfully set kerning groups to: '%s'" % singleQuoteName)
self.openTabIfRequested()
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Quote Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
def insertAnchorsMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Quote Manager' could not write preferences.")
Glyphs.clearLog()
Font = Glyphs.font # frontmost font
# query suffix
dotSuffix = self.getDotSuffix()
# report:
self.reportFont()
print("Inserting cursive attachment anchors in single quotes, and auto-aligning double quotes%s." % (
" with suffix '%s'"%dotSuffix if dotSuffix else ""
))
defaultSingle, defaultDouble = self.defaultQuotes(dotSuffix)
if defaultSingle and not Font.glyphs[defaultSingle]:
self.reportMissingGlyph(defaultSingle)
elif defaultDouble and not Font.glyphs[defaultDouble]:
self.reportMissingGlyph(defaultDouble)
else:
for singleName in names:
doubleName = names[singleName]
if dotSuffix:
doubleName += dotSuffix
singleName += dotSuffix
if singleName=="quotesingle" and Glyphs.defaults["com.mekkablue.QuoteManager.excludeDumbQuotes"]:
print(u"\n⚠️ Skipping %s/%s" % (singleName, doubleName))
else:
print("\n%s/%s:" % (singleName, doubleName))
g = Font.glyphs[singleName] # single quote glyph
gg = Font.glyphs[doubleName] # double quote glyph
if not g:
self.reportMissingGlyph(singleName)
elif not gg:
self.reportMissingGlyph(doubleName)
else:
for master in Font.masters:
mID = master.id
gl = g.layers[mID] # single quote layer
ggl = gg.layers[mID] # double quote layer
# check if a default quote has been determined by the user:
if defaultSingle:
referenceGlyph = Font.glyphs[defaultDouble]
referenceLayer = referenceGlyph.layers[mID]
else:
referenceGlyph = gg
referenceLayer = ggl # layer for measuring, depends on user input
# measure referenceLayer:
xPos = [c.position.x for c in referenceLayer.components]
if xPos and len(xPos)==2:
# add anchors in single quote:
print(xPos[1]-xPos[0], master.name)
dist = abs(xPos[1]-xPos[0])
for aName in ("entry","exit"):
if aName == "exit":
x = dist
else:
x = 0
newAnchor = GSAnchor( "#%s"%aName, NSPoint(x,0) )
gl.anchors.append(newAnchor)
print(u" ✅ %s: Added #exit and #entry anchors." % g.name)
# auto align components
for comp in ggl.components:
comp.automaticAlignment = True
# update metrics:
ggl.updateMetrics()
ggl.syncMetrics()
print(u" ✅ %s: Auto-aligned components." % gg.name)
else:
print(u" ⚠️ WARNING: No components in %s, layer '%s'. Cannot add anchors." % ( referenceLayer.parent.name, referenceLayer.name ))
self.openTabIfRequested()
try:
Font.updateInterface()
except:
pass
Font.currentTab.redraw()
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Quote Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
def metricKeyMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Quote Manager' could not write preferences.")
# brings macro window to front and clears its log:
Glyphs.clearLog()
Font = Glyphs.font # frontmost font
# query suffix
dotSuffix = self.getDotSuffix()
# report:
self.reportFont()
print("Inserting metric keys in single quotes%s." % (
" with suffix '%s'" % dotSuffix if dotSuffix else ""
))
quotesinglbaseName = "quotesinglbase"+dotSuffix
quotesinglbase = Font.glyphs[ quotesinglbaseName ]
quoteleftName = "quoteleft"+dotSuffix
quoteleft = Font.glyphs[ quoteleftName ]
quoterightName = "quoteright"+dotSuffix
quoteright = Font.glyphs[ quoterightName ]
quotesingleName = "quotesingle"+dotSuffix
quotesingle = Font.glyphs[ quotesingleName ]
defaultSingle, defaultDouble = self.defaultQuotes(dotSuffix)
equals = "=%s" % defaultSingle
reverse = "=|%s" % defaultSingle
if quotesingle:
if defaultSingle == quotesingleName:
# dumb quote, all the same:
quotesinglbase.leftMetricsKey = equals
quotesinglbase.rightMetricsKey = equals
quoteleft.leftMetricsKey = equals
quoteleft.rightMetricsKey = equals
quoteright.leftMetricsKey = equals
quoteright.rightMetricsKey = equals
print(u"✅ Updated Metrics Keys for: %s, %s, %s" % (quotesinglbaseName, quoteleftName, quoterightName))
elif not Glyphs.defaults["com.mekkablue.QuoteManager.excludeDumbQuotes"]:
# set dumb quote metric keys:
quotesingle.leftMetricsKey = equals
quotesingle.rightMetricsKey = "=|"
print(u"✅ Updated Metrics Keys for: %s" % (quotesingleName))
else:
print(u"\n⚠️ Skipping %s" % (quotesingleName))
else:
self.reportMissingGlyph(quotesingleName)
if quotesinglbase and defaultSingle == quotesinglbaseName:
if quoteleft:
quoteleft.leftMetricsKey = reverse
quoteleft.rightMetricsKey = reverse
self.reportMetricKeys(quoteleftName)
else:
self.reportMissingGlyph(quoteleftName)
if quoteright:
quoteright.leftMetricsKey = equals
quoteright.rightMetricsKey = equals
self.reportMetricKeys(quoterightName)
else:
self.reportMissingGlyph(quoterightName)
if quoteleft and defaultSingle == quoteleftName:
if quotesinglbase:
quotesinglbase.leftMetricsKey = reverse
quotesinglbase.rightMetricsKey = reverse
self.reportMetricKeys(quotesinglbaseName)
else:
self.reportMissingGlyph(quotesinglbaseName)
if quoteright:
quoteright.leftMetricsKey = reverse
quoteright.rightMetricsKey = reverse
self.reportMetricKeys(quoterightName)
else:
self.reportMissingGlyph(quoterightName)
if quoteright and defaultSingle == quoterightName:
if quotesinglbase:
quotesinglbase.leftMetricsKey = equals
quotesinglbase.rightMetricsKey = equals
self.reportMetricKeys(quotesinglbaseName)
else:
self.reportMissingGlyph(quotesinglbaseName)
if quoteleft:
quoteleft.leftMetricsKey = reverse
quoteleft.rightMetricsKey = reverse
self.reportMetricKeys(quoteleftName)
else:
self.reportMissingGlyph(quoteleftName)
# update metrics:
for thisGlyph in (quotesinglbase, quoteleft, quoteright, quotesingle):
if thisGlyph:
for thisLayer in thisGlyph.layers:
thisLayer.updateMetrics()
thisLayer.syncMetrics()
self.openTabIfRequested()
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Quote Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
def buildDoublesMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Quote Manager' could not write preferences.")
# brings macro window to front and clears its log:
Glyphs.clearLog()
Font = Glyphs.font # frontmost font
# query suffix
dotSuffix = self.getDotSuffix()
# report:
self.reportFont()
print("Inserting single quote components in double quotes%s." % (
" with suffix '%s'"%dotSuffix if dotSuffix else ""
))
for singleName in names:
doubleName = names[singleName]
if dotSuffix:
doubleName += dotSuffix
singleName += dotSuffix
if singleName=="quotesingle" and Glyphs.defaults["com.mekkablue.QuoteManager.excludeDumbQuotes"]:
print(u"\n⚠️ Skipping %s/%s" % (singleName, doubleName))
else:
print("\n%s/%s:" % (singleName, doubleName))
if not Font.glyphs[singleName]:
self.reportMissingGlyph(singleName)
elif not Font.glyphs[doubleName]:
self.reportMissingGlyph(doubleName)
else:
g = Font.glyphs[singleName] # single quote glyph
gg = Font.glyphs[doubleName] # double quote glyph
for master in Font.masters:
mID = master.id
gl = g.layers[mID] # single quote layer
ggl = gg.layers[mID] # double quote layer
# backup and clear layer:
ggl.swapForegroundWithBackground()
ggl.clear()
# add components:
for i in range(2):
newComponent = GSComponent(singleName)
try:
ggl.shapes.append(newComponent)
except:
ggl.components.append(newComponent)
newComponent.automaticAlignment = True
print(u"✅ %s: Added 2 %s components." % (doubleName, singleName))
self.openTabIfRequested()
Font.updateInterface()
Font.currentTab.redraw()
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Quote Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
QuoteManager()
|
apache-2.0
| 2,217,539,419,846,548,700
| 39.133072
| 302
| 0.708246
| false
| 3.3585
| false
| false
| false
|
openstack/oslo.cache
|
oslo_cache/core.py
|
1
|
16849
|
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Caching Layer Implementation.
To use this library:
You must call :func:`configure`.
Inside your application code, decorate the methods that you want the results
to be cached with a memoization decorator created with
:func:`get_memoization_decorator`. This function takes a group name from the
config. Register [`group`] ``caching`` and [`group`] ``cache_time`` options
for the groups that your decorators use so that caching can be configured.
This library's configuration options must be registered in your application's
:class:`oslo_config.cfg.ConfigOpts` instance. Do this by passing the ConfigOpts
instance to :func:`configure`.
The library has special public value for nonexistent or expired keys called
:data:`NO_VALUE`. To use this value you should import it from oslo_cache.core::
from oslo_cache import core
NO_VALUE = core.NO_VALUE
"""
import ssl
import dogpile.cache
from dogpile.cache import api
from dogpile.cache import proxy
from dogpile.cache import util
from oslo_log import log
from oslo_utils import importutils
from oslo_cache._i18n import _
from oslo_cache import _opts
from oslo_cache import exception
__all__ = [
'configure',
'configure_cache_region',
'create_region',
'get_memoization_decorator',
'NO_VALUE',
]
NO_VALUE = api.NO_VALUE
"""Value returned for nonexistent or expired keys."""
_LOG = log.getLogger(__name__)
class _DebugProxy(proxy.ProxyBackend):
"""Extra Logging ProxyBackend."""
# NOTE(morganfainberg): Pass all key/values through repr to ensure we have
# a clean description of the information. Without use of repr, it might
# be possible to run into encode/decode error(s). For logging/debugging
# purposes encode/decode is irrelevant and we should be looking at the
# data exactly as it stands.
def get(self, key):
value = self.proxied.get(key)
_LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
{'key': key, 'value': value})
return value
def get_multi(self, keys):
values = self.proxied.get_multi(keys)
_LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
{'keys': keys, 'values': values})
return values
def set(self, key, value):
_LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
{'key': key, 'value': value})
return self.proxied.set(key, value)
def set_multi(self, keys):
_LOG.debug('CACHE_SET_MULTI: "%r"', keys)
self.proxied.set_multi(keys)
def delete(self, key):
self.proxied.delete(key)
_LOG.debug('CACHE_DELETE: "%r"', key)
def delete_multi(self, keys):
_LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
self.proxied.delete_multi(keys)
def _build_cache_config(conf):
"""Build the cache region dictionary configuration.
:returns: dict
"""
prefix = conf.cache.config_prefix
conf_dict = {}
conf_dict['%s.backend' % prefix] = _opts._DEFAULT_BACKEND
if conf.cache.enabled is True:
conf_dict['%s.backend' % prefix] = conf.cache.backend
conf_dict['%s.expiration_time' % prefix] = conf.cache.expiration_time
for argument in conf.cache.backend_argument:
try:
(argname, argvalue) = argument.split(':', 1)
except ValueError:
msg = ('Unable to build cache config-key. Expected format '
'"<argname>:<value>". Skipping unknown format: %s')
_LOG.error(msg, argument)
continue
arg_key = '.'.join([prefix, 'arguments', argname])
# NOTE(morgan): The handling of the URL data in memcache is bad and
# only takes cases where the values are a list. This explicitly
# checks for the base dogpile.cache.memcached backend and does the
# split if needed. Other backends such as redis get the same
# previous behavior. Overall the fact that the backends opaquely
# take data and do not handle processing/validation as expected
# directly makes for odd behaviors when wrapping dogpile.cache in
# a library like oslo.cache
if (conf.cache.backend
in ('dogpile.cache.memcached', 'oslo_cache.memcache_pool') and
argname == 'url'):
argvalue = argvalue.split(',')
conf_dict[arg_key] = argvalue
_LOG.debug('Oslo Cache Config: %s', conf_dict)
# NOTE(yorik-sar): these arguments will be used for memcache-related
# backends. Use setdefault for url to support old-style setting through
# backend_argument=url:127.0.0.1:11211
#
# NOTE(morgan): If requested by config, 'flush_on_reconnect' will be set
# for pooled connections. This can ensure that stale data is never
# consumed from a server that pops in/out due to a network partition
# or disconnect.
#
# See the help from python-memcached:
#
# param flush_on_reconnect: optional flag which prevents a
# scenario that can cause stale data to be read: If there's more
# than one memcached server and the connection to one is
# interrupted, keys that mapped to that server will get
# reassigned to another. If the first server comes back, those
# keys will map to it again. If it still has its data, get()s
# can read stale data that was overwritten on another
# server. This flag is off by default for backwards
# compatibility.
#
# The normal non-pooled clients connect explicitly on each use and
# does not need the explicit flush_on_reconnect
conf_dict.setdefault('%s.arguments.url' % prefix,
conf.cache.memcache_servers)
for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
'pool_unused_timeout', 'pool_connection_get_timeout',
'pool_flush_on_reconnect'):
value = getattr(conf.cache, 'memcache_' + arg)
conf_dict['%s.arguments.%s' % (prefix, arg)] = value
if conf.cache.tls_enabled:
_LOG.debug('Oslo Cache TLS - CA: %s', conf.cache.tls_cafile)
tls_context = ssl.create_default_context(cafile=conf.cache.tls_cafile)
if conf.cache.tls_certfile is not None:
_LOG.debug('Oslo Cache TLS - cert: %s', conf.cache.tls_certfile)
_LOG.debug('Oslo Cache TLS - key: %s', conf.cache.tls_keyfile)
tls_context.load_cert_chain(
conf.cache.tls_certfile,
conf.cache.tls_keyfile,
)
if conf.cache.tls_allowed_ciphers is not None:
_LOG.debug(
'Oslo Cache TLS - ciphers: %s',
conf.cache.tls_allowed_ciphers,
)
tls_context.set_ciphers(conf.cache.tls_allowed_ciphers)
conf_dict['%s.arguments.tls_context' % prefix] = tls_context
return conf_dict
def _sha1_mangle_key(key):
"""Wrapper for dogpile's sha1_mangle_key.
dogpile's sha1_mangle_key function expects an encoded string, so we
should take steps to properly handle multiple inputs before passing
the key through.
"""
try:
key = key.encode('utf-8', errors='xmlcharrefreplace')
except (UnicodeError, AttributeError):
# NOTE(stevemar): if encoding fails just continue anyway.
pass
return util.sha1_mangle_key(key)
def _key_generate_to_str(s):
# NOTE(morganfainberg): Since we need to stringify all arguments, attempt
# to stringify and handle the Unicode error explicitly as needed.
try:
return str(s)
except UnicodeEncodeError:
return s.encode('utf-8')
def function_key_generator(namespace, fn, to_str=_key_generate_to_str):
# NOTE(morganfainberg): This wraps dogpile.cache's default
# function_key_generator to change the default to_str mechanism.
return util.function_key_generator(namespace, fn, to_str=to_str)
def kwarg_function_key_generator(namespace, fn, to_str=_key_generate_to_str):
# NOTE(ralonsoh): This wraps dogpile.cache's default
# kwarg_function_key_generator to change the default to_str mechanism.
return util.kwarg_function_key_generator(namespace, fn, to_str=to_str)
def create_region(function=function_key_generator):
"""Create a region.
This is just dogpile.cache.make_region, but the key generator has a
different to_str mechanism.
.. note::
You must call :func:`configure_cache_region` with this region before
a memoized method is called.
:param function: function used to generate a unique key depending on the
arguments of the decorated function
:type function: function
:returns: The new region.
:rtype: :class:`dogpile.cache.region.CacheRegion`
"""
return dogpile.cache.make_region(function_key_generator=function)
def configure_cache_region(conf, region):
"""Configure a cache region.
If the cache region is already configured, this function does nothing.
Otherwise, the region is configured.
:param conf: config object, must have had :func:`configure` called on it.
:type conf: oslo_config.cfg.ConfigOpts
:param region: Cache region to configure (see :func:`create_region`).
:type region: dogpile.cache.region.CacheRegion
:raises oslo_cache.exception.ConfigurationError: If the region parameter is
not a dogpile.cache.CacheRegion.
:returns: The region.
:rtype: :class:`dogpile.cache.region.CacheRegion`
"""
if not isinstance(region, dogpile.cache.CacheRegion):
raise exception.ConfigurationError(
_('region not type dogpile.cache.CacheRegion'))
if not region.is_configured:
# NOTE(morganfainberg): this is how you tell if a region is configured.
# There is a request logged with dogpile.cache upstream to make this
# easier / less ugly.
config_dict = _build_cache_config(conf)
region.configure_from_config(config_dict,
'%s.' % conf.cache.config_prefix)
if conf.cache.debug_cache_backend:
region.wrap(_DebugProxy)
# NOTE(morganfainberg): if the backend requests the use of a
# key_mangler, we should respect that key_mangler function. If a
# key_mangler is not defined by the backend, use the sha1_mangle_key
# mangler provided by dogpile.cache. This ensures we always use a fixed
# size cache-key.
if region.key_mangler is None:
region.key_mangler = _sha1_mangle_key
for class_path in conf.cache.proxies:
# NOTE(morganfainberg): if we have any proxy wrappers, we should
# ensure they are added to the cache region's backend. Since
# configure_from_config doesn't handle the wrap argument, we need
# to manually add the Proxies. For information on how the
# ProxyBackends work, see the dogpile.cache documents on
# "changing-backend-behavior"
cls = importutils.import_class(class_path)
_LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
region.wrap(cls)
return region
def _get_should_cache_fn(conf, group):
"""Build a function that returns a config group's caching status.
For any given object that has caching capabilities, a boolean config option
for that object's group should exist and default to ``True``. This
function will use that value to tell the caching decorator if caching for
that object is enabled. To properly use this with the decorator, pass this
function the configuration group and assign the result to a variable.
Pass the new variable to the caching decorator as the named argument
``should_cache_fn``.
:param conf: config object, must have had :func:`configure` called on it.
:type conf: oslo_config.cfg.ConfigOpts
:param group: name of the configuration group to examine
:type group: string
:returns: function reference
"""
def should_cache(value):
if not conf.cache.enabled:
return False
conf_group = getattr(conf, group)
return getattr(conf_group, 'caching', True)
return should_cache
def _get_expiration_time_fn(conf, group):
"""Build a function that returns a config group's expiration time status.
For any given object that has caching capabilities, an int config option
called ``cache_time`` for that driver's group should exist and typically
default to ``None``. This function will use that value to tell the caching
decorator of the TTL override for caching the resulting objects. If the
value of the config option is ``None`` the default value provided in the
``[cache] expiration_time`` option will be used by the decorator. The
default may be set to something other than ``None`` in cases where the
caching TTL should not be tied to the global default(s).
To properly use this with the decorator, pass this function the
configuration group and assign the result to a variable. Pass the new
variable to the caching decorator as the named argument
``expiration_time``.
:param group: name of the configuration group to examine
:type group: string
:rtype: function reference
"""
def get_expiration_time():
conf_group = getattr(conf, group)
return getattr(conf_group, 'cache_time', None)
return get_expiration_time
def get_memoization_decorator(conf, region, group, expiration_group=None):
"""Build a function based on the `cache_on_arguments` decorator.
The memoization decorator that gets created by this function is a
:meth:`dogpile.cache.region.CacheRegion.cache_on_arguments` decorator,
where
* The ``should_cache_fn`` is set to a function that returns True if both
the ``[cache] enabled`` option is true and [`group`] ``caching`` is
True.
* The ``expiration_time`` is set from the
[`expiration_group`] ``cache_time`` option if ``expiration_group``
is passed in and the value is set, or [`group`] ``cache_time`` if
``expiration_group`` is not passed in and the value is set, or
``[cache] expiration_time`` otherwise.
Example usage::
import oslo_cache.core
MEMOIZE = oslo_cache.core.get_memoization_decorator(
conf, region, group='group1')
@MEMOIZE
def function(arg1, arg2):
...
ALTERNATE_MEMOIZE = oslo_cache.core.get_memoization_decorator(
conf, region, group='group2', expiration_group='group3')
@ALTERNATE_MEMOIZE
def function2(arg1, arg2):
...
:param conf: config object, must have had :func:`configure` called on it.
:type conf: oslo_config.cfg.ConfigOpts
:param region: region as created by :func:`create_region`.
:type region: dogpile.cache.region.CacheRegion
:param group: name of the configuration group to examine
:type group: string
:param expiration_group: name of the configuration group to examine
for the expiration option. This will fall back to
using ``group`` if the value is unspecified or
``None``
:type expiration_group: string
:rtype: function reference
"""
if expiration_group is None:
expiration_group = group
should_cache = _get_should_cache_fn(conf, group)
expiration_time = _get_expiration_time_fn(conf, expiration_group)
memoize = region.cache_on_arguments(should_cache_fn=should_cache,
expiration_time=expiration_time)
# Make sure the actual "should_cache" and "expiration_time" methods are
# available. This is potentially interesting/useful to pre-seed cache
# values.
memoize.should_cache = should_cache
memoize.get_expiration_time = expiration_time
return memoize
def configure(conf):
"""Configure the library.
Register the required oslo.cache config options into an oslo.config CONF
object.
This must be called before :py:func:`configure_cache_region`.
:param conf: The configuration object.
:type conf: oslo_config.cfg.ConfigOpts
"""
_opts.configure(conf)
|
apache-2.0
| -3,830,316,320,941,976,000
| 37.822581
| 79
| 0.664312
| false
| 4.017406
| true
| false
| false
|
matichorvat/pydelphin
|
delphin/mrs/dmrx.py
|
1
|
8902
|
# DMRX codec
# Summary: This module implements serialization and deserialization of the
# XML encoding of Distributed Minimal Recusion Semantics (DMRS). It
# provides standard Pickle API calls of load, loads, dump, and dumps
# for serializing and deserializing DMRX corpora. Further,
# load_one, loads_one, dump_one, and dumps_one operate on a single
# DMRX/DMRS.
#
# Author: Michael Wayne Goodman <goodmami@uw.edu>
from __future__ import print_function
from collections import OrderedDict
import re
import xml.etree.ElementTree as etree
from delphin.mrs import (Dmrs, Node, Link, Pred, Lnk)
from delphin.mrs.components import (nodes, links)
from delphin.mrs.config import QUANTIFIER_POS
from delphin.mrs.util import etree_tostring
##############################################################################
##############################################################################
# Pickle-API methods
def load(fh, single=False):
ms = deserialize(fh)
if single:
ms = next(ms)
return ms
def loads(s, single=False):
corpus = etree.fromstring(s)
if single:
ds = _deserialize_dmrs(next(iter(corpus)))
else:
ds = (_deserialize_dmrs(dmrs_elem) for dmrs_elem in corpus)
return ds
def dump(fh, ms, **kwargs):
print(dumps(ms, **kwargs), file=fh)
def dumps(ms, single=False, pretty_print=False, **kwargs):
if single:
ms = [ms]
return serialize(ms, pretty_print=pretty_print)
# for convenience
load_one = lambda fh: load(fh, single=True)
loads_one = lambda s: loads(s, single=True)
dump_one = lambda fh, m, **kwargs: dump(fh, m, single=True, **kwargs)
dumps_one = lambda m, **kwargs: dumps(m, single=True, **kwargs)
##############################################################################
##############################################################################
# Decoding
def deserialize(fh):
"""Deserialize a DMRX-encoded DMRS structure."""
# <!ELEMENT dmrs-list (dmrs)*>
# if memory becomes a big problem, consider catching start events,
# get the root element (later start events can be ignored), and
# root.clear() after decoding each mrs
for event, elem in etree.iterparse(fh, events=('end',)):
if elem.tag == 'dmrs':
yield _deserialize_dmrs(elem)
elem.clear()
def _deserialize_dmrs(elem):
# <!ELEMENT dmrs (node|link)*>
# <!ATTLIST dmrs
# cfrom CDATA #REQUIRED
# cto CDATA #REQUIRED
# surface CDATA #IMPLIED
# ident CDATA #IMPLIED >
elem = elem.find('.') # in case elem is an ElementTree rather than Element
return Dmrs(nodes=list(map(_decode_node, elem.iter('node'))),
links=list(map(_decode_link, elem.iter('link'))),
lnk=_decode_lnk(elem),
surface=elem.get('surface'),
identifier=elem.get('ident'))
def _decode_node(elem):
# <!ELEMENT node ((realpred|gpred), sortinfo)>
# <!ATTLIST node
# nodeid CDATA #REQUIRED
# cfrom CDATA #REQUIRED
# cto CDATA #REQUIRED
# surface CDATA #IMPLIED
# base CDATA #IMPLIED
# carg CDATA #IMPLIED >
return Node(pred=_decode_pred(elem.find('*[1]')),
nodeid=int(elem.get('nodeid')),
sortinfo=_decode_sortinfo(elem.find('sortinfo')),
lnk=_decode_lnk(elem),
surface=elem.get('surface'),
base=elem.get('base'),
carg=elem.get('carg'))
def _decode_pred(elem):
# <!ELEMENT realpred EMPTY>
# <!ATTLIST realpred
# lemma CDATA #REQUIRED
# pos (v|n|j|r|p|q|c|x|u|a|s) #REQUIRED
# sense CDATA #IMPLIED >
# <!ELEMENT gpred (#PCDATA)>
if elem.tag == 'gpred':
return Pred.grammarpred(elem.text)
elif elem.tag == 'realpred':
return Pred.realpred(elem.get('lemma'),
elem.get('pos'),
elem.get('sense'))
def _decode_sortinfo(elem):
# <!ELEMENT sortinfo EMPTY>
# <!ATTLIST sortinfo
# cvarsort (x|e|i|u) #IMPLIED
# num (sg|pl|u) #IMPLIED
# pers (1|2|3|1-or-3|u) #IMPLIED
# gend (m|f|n|m-or-f|u) #IMPLIED
# sf (prop|ques|comm|prop-or-ques|u) #IMPLIED
# tense (past|pres|fut|tensed|untensed|u) #IMPLIED
# mood (indicative|subjunctive|u) #IMPLIED
# prontype (std_pron|zero_pron|refl|u) #IMPLIED
# prog (plus|minus|u) #IMPLIED
# perf (plus|minus|u) #IMPLIED
# ind (plus|minus|u) #IMPLIED >
# note: Just accept any properties, since these are ERG-specific
return elem.attrib
def _decode_link(elem):
# <!ELEMENT link (rargname, post)>
# <!ATTLIST link
# from CDATA #REQUIRED
# to CDATA #REQUIRED >
# <!ELEMENT rargname (#PCDATA)>
# <!ELEMENT post (#PCDATA)>
return Link(start=elem.get('from'),
end=elem.get('to'),
rargname=getattr(elem.find('rargname'), 'text', None),
post=getattr(elem.find('post'), 'text', None))
def _decode_lnk(elem):
return Lnk.charspan(elem.get('cfrom', '-1'), elem.get('cto', '-1'))
##############################################################################
##############################################################################
# Encoding
_strict = False
def serialize(ms, strict=False, encoding='unicode', pretty_print=False):
e = etree.Element('dmrs-list')
for m in ms:
e.append(_encode_dmrs(m, strict=strict))
# for now, pretty_print=True is the same as pretty_print='LKB'
if pretty_print in ('LKB', 'lkb', 'Lkb', True):
lkb_pprint_re = re.compile(r'(<dmrs[^>]+>|</node>|</link>|</dmrs>)')
string = etree_tostring(e, encoding=encoding)
return lkb_pprint_re.sub(r'\1\n', string)
# pretty_print is only lxml. Look into tostringlist, maybe?
# return etree.tostring(e, pretty_print=pretty_print, encoding='unicode')
return etree_tostring(e, encoding=encoding)
def _encode_dmrs(m, strict=False):
_strict = strict
attributes = OrderedDict([('cfrom', str(m.cfrom)),
('cto', str(m.cto))])
if m.surface is not None:
attributes['surface'] = m.surface
if m.identifier is not None:
attributes['ident'] = m.identifier
# if not _strict and m.index is not None:
# # index corresponds to a variable, so link it to a nodeid
# index_nodeid = m.get_nodeid(m.index)
# if index_nodeid is not None:
# attributes['index'] = str(index_nodeid)
e = etree.Element('dmrs', attrib=attributes)
for node in nodes(m):
e.append(_encode_node(node))
for link in links(m):
e.append(_encode_link(link))
return e
def _encode_node(node):
attributes = OrderedDict([('nodeid', str(node.nodeid)),
('cfrom', str(node.cfrom)),
('cto', str(node.cto))])
if node.surface is not None:
attributes['surface'] = node.surface
if node.base is not None:
attributes['base'] = node.base
if node.carg is not None:
attributes['carg'] = node.carg
e = etree.Element('node', attrib=attributes)
e.append(_encode_pred(node.pred))
e.append(_encode_sortinfo(node))
return e
def _encode_pred(pred):
if pred.type == Pred.GRAMMARPRED:
e = etree.Element('gpred')
e.text = pred.string.strip('"\'')
elif pred.type in (Pred.REALPRED, Pred.STRINGPRED):
attributes = {}
if pred.lemma is not None:
attributes['lemma'] = pred.lemma
if pred.pos is not None:
attributes['pos'] = pred.pos
if pred.sense is not None:
attributes['sense'] = str(pred.sense)
e = etree.Element('realpred', attrib=attributes)
return e
def _encode_sortinfo(node):
attributes = OrderedDict()
# return empty <sortinfo/> for quantifiers
if node.pred.pos == QUANTIFIER_POS:
return etree.Element('sortinfo') # return empty <sortinfo/>
if node.sortinfo:
if not _strict:
for k, v in node.sortinfo.items():
attributes[k.lower()] = str(v)
else:
pass # TODO add strict sortinfo
e = etree.Element('sortinfo', attrib=attributes or {})
return e
def _encode_link(link):
e = etree.Element('link', attrib={'from': str(link.start),
'to': str(link.end)})
rargname = etree.Element('rargname')
rargname.text = link.rargname
post = etree.Element('post')
post.text = link.post
e.append(rargname)
e.append(post)
return e
|
mit
| -8,480,411,096,646,843,000
| 33.909804
| 79
| 0.552572
| false
| 3.595315
| false
| false
| false
|
tectronics/yellowbox
|
yblib/statparser.py
|
1
|
2423
|
"""A simple parser library for statistics messages produced by Yellowbox.
This module exports a function and an exception raised by that function.
Function:
statparse - Parses a Yellowbox stat message
Exception:
InvalidMessageException - Raised when statparse is passed something
that isn't a Yellowbox stat message.
"""
import datetime
__all__ = ('statparse', 'InvalidMessageException')
# Type converter table.
TYPEXFORM = {
# For some reason datetime.fromtimestamp() wants the timestamps
# to be in floating point. So, we give it what it wants.
'start': lambda x: datetime.datetime.fromtimestamp(float(x)),
'end': lambda x: datetime.datetime.fromtimestamp(float(x)),
'size': int,
# ASCII format is day:second so we split the string,
# convert the pieces to integers, and feed the unpacked
# tuple to datetime.timedelta
'oldest': lambda age: datetime.timedelta(seconds=int(age))
}
class InvalidMessageException(Exception):
"""Indicates an attempt to parse an incorrectly-formatted message."""
pass
def statparse(smsg):
"""Take a stats message from Yellowbox and turn it into a dictionary."""
# Convert the message into a tuple, split about 'stats ->' and stripped
# of spaces and paired delimiters.
# The first element of the tuple contains the name of the facility,
# the second contains the flattened dict.
try:
(name, data) = (x.strip('<>(){}: ') for x in smsg.split("stats ->"))
name = name[name.index('['):].strip('[]')
except ValueError:
raise InvalidMessageException('Can not parse "%s"' % smsg)
# Convert the semicolon-delimited key-value pairs into a tuple of
# key-value pairs
data = (x.split(': ') for x in (x.strip() for x in data.split(';')))
# Do type coercion and return a tuple mapping the name of the sender
# to a dictionary containing the key-value pairings with the types
# corrected.
return (name, dict((k, TYPEXFORM.get(k, lambda x: x)(v)) for (k, v) in data))
def main():
"""Testing function."""
sample = (
'CaptureManager[wired_ethernet]: stats ->'
' {size: 37; start: 1241575692; end: 1241575723}',
'CaptureManager[loopback]: stats ->'
' {size: 34; start: 1241575692; end: 1241575723}',
'PoolManager[pretend-foo]: stats ->'
' {free: 531628032; thresh: 3865470566;'
' poolsize: 4294967296; oldest: 33358}'
)
import pprint
cooked = dict([statparse(s) for s in sample])
pprint.pprint(cooked)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -835,843,933,377,879,400
| 32.652778
| 78
| 0.707388
| false
| 3.355956
| false
| false
| false
|
macmanes-lab/GeosmithiaComparativeGenomics
|
Scripts4PAML/6_removehashtag_TA.py
|
1
|
1423
|
#!/usr/bin/python3
# A program for adding hashtags to the "foreground" species.
# USAGE: ./6_removehashtag_TA.py --input path_to_input_directory
# Author: Taruna Aggarwal
# Contact: ta2007@wildcats.unh.edu
# Affiliation: University of New Hampshire, Durham, NH, USA
# Date: 1/27/2016
# Purpose is to remove '#1' from the species header that is considered the foreground branch
# for the branch-site model in codeml of PAML
# The script will generate new files in the same directory as itself.
# The new files will be appended with '.fixed.clean'
import argparse
import os
parser = argparse.ArgumentParser(description="This script renames files and their headers in a directory.")
parser.add_argument('--input', help="PATH to the directory with input files.", required=True)
args = parser.parse_args()
for file in os.listdir(args.input):
if file.endswith(".clean"):
working_file = open(args.input + '/' + file, "r")
new_file = open(file[:-6] + ".fixed.clean", "w")
for currentLine in working_file:
currentLine = currentLine.rstrip()
if currentLine.startswith(">geos_morb"):
new_file.write("{0}{1}\n".format(currentLine[:-2]))
#elif currentLine[0]==">":
# new_file.write("{0}\n".format(currentLine[0:10]))
else:
new_file.write("{0}\n".format(currentLine))
working_file.close()
new_file.close()
|
cc0-1.0
| 5,111,911,443,499,973,000
| 38.527778
| 107
| 0.665495
| false
| 3.531017
| false
| false
| false
|
Xi-Plus/Xiplus-Wikipedia-Bot
|
push/edit.py
|
1
|
4521
|
# -*- coding: utf-8 -*-
import argparse
import os
from config import cfg # pylint: disable=E0611,W0614
from func import file_get_contents
os.environ['TZ'] = 'UTC'
parser = argparse.ArgumentParser()
parser.add_argument('--auto', action='store_true')
parser.add_argument('--no-diff', action='store_true')
parser.add_argument('--pull', action='store_true')
parser.set_defaults(auto=False, no_diff=False, pull=False)
args = parser.parse_args()
print(args)
print('===== project =====')
project = None
while project is None:
for key, val in enumerate(cfg['project'], 1):
print('\t', key, val)
project = input('select a project:')
try:
project = int(project)
project = list(cfg['project'].values())[project - 1]
break
except Exception as e:
print(e)
project = None
print('project', project)
print()
print('===== web =====')
web = None
while web is None:
for key, val in enumerate(project['web'], 1):
print('\t', key, val)
web = input('select a web:')
try:
web = int(web)
webname = project['web'][web - 1]
web = cfg['web'][webname]
break
except Exception as e:
print(e)
web = None
print('web', web)
print()
os.environ['PYWIKIBOT_DIR'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'user-config',
webname
)
import pywikibot
site = pywikibot.Site()
site.login()
print('===== source =====')
source = None
while source is None:
for key, val in enumerate(project['source'], 1):
print('\t', key, val)
source = input('select a source:')
try:
source = int(source)
source = cfg['source'][project['source'][source - 1]]
break
except Exception as e:
print(e)
source = None
print('source', source)
print()
print('===== target =====')
target = None
while target is None:
for key, val in enumerate(project['target'], 1):
print('\t', key, val)
target = input('select a target:')
try:
target = int(target)
target = cfg['target'][project['target'][target - 1]]
break
except Exception as e:
print(e)
target = None
print('target', target)
print()
print('===== files =====')
files = {}
while len(files) == 0:
cnt = 0
for fromname in project['files']:
cnt += 1
print('\t', cnt, '\t', fromname, '\t', project['files'][fromname])
temp = input('select a files:')
idxs = []
try:
for idx in temp.split():
idx = int(idx)
idxs.append(idx)
except Exception as e:
print(e)
continue
if any([idx < 0 for idx in idxs]):
for fromname in project['files']:
files[fromname] = project['files'][fromname]
try:
for idx in temp.split():
idx = int(idx)
if idx > 0:
files[list(project['files'].keys())[idx - 1]] = list(project['files'].values())[idx - 1]
else:
del files[list(project['files'].keys())[(-idx) - 1]]
break
except Exception as e:
print(e)
files = {}
if len(files) == 0:
for fromname in project['files']:
files[fromname] = project['files'][fromname]
print('files', files)
print()
summary = project['summary']
print('summary:', summary)
temp = input('new summary:').strip()
if temp != '':
summary = temp
print('summary:', summary)
print()
for fromname in files:
toname = files[fromname]
fromname = source + fromname
toname = target + toname
if args.pull:
print(fromname, '<-', toname)
page = pywikibot.Page(site, toname)
if not page.exists():
print('Page is not exists')
continue
with open(fromname, 'w', encoding='utf8') as f:
f.write(page.text)
continue
print(fromname, '->', toname)
try:
text = file_get_contents(fromname)
except Exception as e:
print(e)
continue
page = pywikibot.Page(site, toname)
if page.text == '':
print('New page')
elif page.text == text.rstrip():
print('Nothing changed. Skipped.')
continue
else:
if not args.no_diff:
pywikibot.showDiff(page.text, text)
if args.auto:
save = 'yes'
else:
save = input('Save?')
if save.lower() in ['', 'y', 'yes']:
page.text = text
page.save(summary=summary, minor=web['minor'], botflag=web['bot'], nocreate=web['nocreate'])
|
mit
| -5,548,971,069,188,324,000
| 24.542373
| 104
| 0.561823
| false
| 3.64303
| false
| false
| false
|
GabrielDumbrava/django-categories
|
categories/base.py
|
1
|
5807
|
"""
This is the base class on which to build a hierarchical category-like model
with customizable metadata and its own name space.
"""
from django.contrib import admin
from django.db import models
from django import forms
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from mptt.models import MPTTModel
from mptt.fields import TreeForeignKey
from mptt.managers import TreeManager
from slugify import slugify
from .editor.tree_editor import TreeEditor
from .settings import ALLOW_SLUG_CHANGE, SLUG_TRANSLITERATOR
class CategoryManager(models.Manager):
"""
A manager that adds an "active()" method for all active categories
"""
def active(self):
"""
Only categories that are active
"""
return self.get_queryset().filter(active=True)
class CategoryBase(MPTTModel):
"""
This base model includes the absolute bare bones fields and methods. One
could simply subclass this model and do nothing else and it should work.
"""
parent = TreeForeignKey('self',
blank=True,
null=True,
related_name='children',
verbose_name=_('parent'))
name = models.CharField(max_length=100, verbose_name=_('name'))
slug = models.SlugField(verbose_name=_('slug'))
active = models.BooleanField(default=True, verbose_name=_('active'))
objects = CategoryManager()
tree = TreeManager()
def save(self, *args, **kwargs):
"""
While you can activate an item without activating its descendants,
It doesn't make sense that you can deactivate an item and have its
decendants remain active.
"""
if not self.slug:
self.slug = slugify(SLUG_TRANSLITERATOR(self.name))[:50]
super(CategoryBase, self).save(*args, **kwargs)
if not self.active:
for item in self.get_descendants():
if item.active != self.active:
item.active = self.active
item.save()
def __unicode__(self):
ancestors = self.get_ancestors()
return ' > '.join([force_unicode(i.name) for i in ancestors] + [self.name, ])
class Meta:
abstract = True
unique_together = ('parent', 'name')
ordering = ('tree_id', 'lft')
class MPTTMeta:
order_insertion_by = 'name'
class CategoryBaseAdminForm(forms.ModelForm):
def clean_slug(self):
if not self.cleaned_data.get('slug', None):
if self.instance is None or not ALLOW_SLUG_CHANGE:
self.cleaned_data['slug'] = slugify(SLUG_TRANSLITERATOR(self.cleaned_data['name']))
return self.cleaned_data['slug'][:50]
def clean(self):
super(CategoryBaseAdminForm, self).clean()
if not self.is_valid():
return self.cleaned_data
opts = self._meta
# Validate slug is valid in that level
kwargs = {}
if self.cleaned_data.get('parent', None) is None:
kwargs['parent__isnull'] = True
else:
kwargs['parent__pk'] = int(self.cleaned_data['parent'].id)
this_level_slugs = [c['slug'] for c in opts.model.objects.filter(
**kwargs).values('id', 'slug'
) if c['id'] != self.instance.id]
if self.cleaned_data['slug'] in this_level_slugs:
raise forms.ValidationError(_('The slug must be unique among '
'the items at its level.'))
# Validate Category Parent
# Make sure the category doesn't set itself or any of its children as
# its parent.
decendant_ids = self.instance.get_descendants().values_list('id', flat=True) if self.instance.id else None
if self.cleaned_data.get('parent', None) is None or self.instance.id is None:
return self.cleaned_data
elif self.cleaned_data['parent'].id == self.instance.id:
raise forms.ValidationError(_("You can't set the parent of the "
"item to itself."))
elif self.cleaned_data['parent'].id in decendant_ids:
raise forms.ValidationError(_("You can't set the parent of the "
"item to a descendant."))
return self.cleaned_data
class CategoryBaseAdmin(TreeEditor, admin.ModelAdmin):
form = CategoryBaseAdminForm
list_display = ('name', 'active')
search_fields = ('name',)
prepopulated_fields = {'slug': ('name',)}
actions = ['activate', 'deactivate']
def get_actions(self, request):
actions = super(CategoryBaseAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def deactivate(self, request, queryset):
"""
Set active to False for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
if item.active:
item.active = False
item.save()
item.children.all().update(active=False)
deactivate.short_description = _('Deactivate selected categories and their children')
def activate(self, request, queryset):
"""
Set active to True for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
item.active = True
item.save()
item.children.all().update(active=True)
activate.short_description = _('Activate selected categories and their children')
|
apache-2.0
| -402,155,265,483,884,540
| 34.845679
| 114
| 0.608404
| false
| 4.317472
| false
| false
| false
|
DMAshura/porcupyne
|
resources.py
|
1
|
1891
|
from pyglet import resource, media, image
import os
import string
class Resource:
def __init__(self):
self.sound_dict = {}
self.image_dict = {}
self.image_filetypes = ('.jpg', '.gif', '.bmp', '.png')
self.sound_filetypes = ('.wav', '.mp3')
self.filetypes = []
self.filetypes.extend(self.image_filetypes)
self.filetypes.extend(self.sound_filetypes)
def load_directory(self, path):
resource.path.append(path)
print resource.path
osPath = ''
for _ in resource.path:
osPath += _
osPath += os.sep
osPath = osPath[:-1]
print osPath
dirList = os.listdir(osPath)
print "Entering directory %s.\n" % path
resource.reindex()
for fname in dirList:
ext = ''
print fname
if string.rfind(fname,".") != -1:
name = fname[:string.rfind(fname,".")]
ext = fname[string.rfind(fname,"."):]
else:
name = fname
print "name = %s" % name
print "ext = %s" % ext
if ( ext ) and (ext in self.filetypes):
self.load_file(name, ext, osPath)
if not ext:
self.load_directory(name)
print "Leaving directory %s.\n" % resource.path.pop()
def load_file(self, name, ext, path):
if ext in self.image_filetypes:
self.image_dict[name + ext] = image.load(os.path.join(path,
'%s%s' % (name, ext))).get_texture()
print "Image '%s' loaded!" % (name + ext)
if ext in self.sound_filetypes:
self.sound_dict[name + ext] = media.load(path + os.sep + name + ext,
streaming = False)
print "Sound '%s' loaded!" % (name + ext)
|
gpl-3.0
| -3,408,408,742,811,772,000
| 32.175439
| 80
| 0.495505
| false
| 3.898969
| false
| false
| false
|
sjlehtin/django
|
django/core/management/__init__.py
|
1
|
15287
|
import functools
import os
import pkgutil
import sys
from collections import OrderedDict, defaultdict
from contextlib import suppress
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand, CommandError, CommandParser, handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload
from django.utils.encoding import force_text
def find_commands(management_dir):
"""
Given a path to a management directory, return a list of all the command
names that are available.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, return the Command
class instance. Allow all errors raised by the import process
(ImportError, AttributeError) to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@functools.lru_cache(maxsize=None)
def get_commands():
"""
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(command_name, *args, **options):
"""
Call the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`command_name` may be a string or a command object. Using a string is
preferred unless the command object is required for further processing or
testing.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
from django.core.management.commands import flush
cmd = flush.Command()
call_command(cmd, verbosity=0, interactive=False)
# Do something with cmd ...
"""
if isinstance(command_name, BaseCommand):
# Command object passed in.
command = command_name
command_name = command.__class__.__module__.split('.')[-1]
else:
# Load the command object by name.
try:
app_name = get_commands()[command_name]
except KeyError:
raise CommandError("Unknown command: %r" % command_name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, command_name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', command_name)
# Use the `dest` option name from the parser option
opt_mapping = {
min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=[force_text(a) for a in args])
defaults = dict(defaults._get_kwargs(), **arg_options)
# Raise an error if any unknown options were passed.
stealth_options = set(command.base_stealth_options + command.stealth_options)
dest_parameters = {action.dest for action in parser._actions}
valid_options = (dest_parameters | stealth_options).union(opt_mapping)
unknown_options = set(options) - valid_options
if unknown_options:
raise TypeError(
"Unknown option(s) for %s command: %s. "
"Valid options are: %s." % (
command_name,
', '.join(sorted(unknown_options)),
', '.join(sorted(valid_options)),
)
)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility:
"""
Encapsulate the logic of the django-admin and manage.py utilities.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
if self.prog_name == '__main__.py':
self.prog_name = 'python -m django'
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""Return the script's main help text, as a string."""
if commands_only:
usage = sorted(get_commands())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in get_commands().items():
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Try to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get('DJANGO_SETTINGS_MODULE'):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
sys.stderr.write(
"Unknown command: %r\nType '%s help' for usage.\n"
% (subcommand, self.prog_name)
)
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
with suppress(ImportError):
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
parser = subcommand_cls.create_parser('', cwords[0])
options.extend(
(min(s_opt.option_strings), s_opt.nargs != 0)
for s_opt in parser._actions if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = {x.split('=')[0] for x in cwords[1:cword - 1]}
options = (opt for opt in options if opt[0] not in prev_opts)
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for opt_label, require_arg in options:
# append '=' to options which require args
if require_arg:
opt_label += '='
print(opt_label)
# Exit code of the bash completion function is never passed back to
# the user, so it's safe to always exit with 0.
# For more details see #25420.
sys.exit(0)
def execute(self):
"""
Given the command-line arguments, figure out which subcommand is being
run, create a parser appropriate to that command, and run it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
with suppress(CommandError): # Ignore any option errors at this point.
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# Remove options not compatible with the built-in runserver
# (e.g. options for the contrib.staticfiles' runserver).
# Changes here require manually testing as described in
# #27522.
_parser = self.fetch_command('runserver').create_parser('django', 'runserver')
_options, _args = _parser.parse_known_args(self.argv[2:])
for _arg in _args:
self.argv.remove(_arg)
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""Run a ManagementUtility."""
utility = ManagementUtility(argv)
utility.execute()
|
bsd-3-clause
| -3,926,000,415,800,669,700
| 40.540761
| 98
| 0.604501
| false
| 4.436158
| true
| false
| false
|
NCBI-Hackathons/Cancer_Epitopes_CSHL
|
src/imm_predict/fred2_background.py
|
1
|
3414
|
#!/usr/bin/env python2
"""
Given a protein FASTA file, compute immunogenicity for all posible 9-mer peptides.
Usage:
fred2_background.py [--alleles=<alleles_list> --top_N=N] --input=FILE_IN --output=FILE_OUT
fred2_background.py -h | --help
Arguments:
--input=FILE_IN Input fasta file, can be retrieved from:
ftp://ftp.ensembl.org/pub/release-86/fasta/homo_sapiens/pep/Homo_sapiens.GRCh38.pep.all.fa.gz
--output=FILE_OUT Output csv file
Options:
--top_N=N Number of top N proteins to compute the background for. [Default all].
--alleles=<alleles_list> Comma separated list of target alleles [Default use all]:
--allleles="B*27:20,B*83:01,A*32:15"
"""
# read in the vcf file
import sys, os
# sys.path.append("/home/avsec/Cancer_Epitopes_CSHL/src")
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../")
from Fred2.Core import Allele, Peptide, Protein, generate_peptides_from_proteins
from Fred2.IO import read_lines, read_fasta
from Fred2.EpitopePrediction import EpitopePredictorFactory
from imm_predict import fred2wrap
from subprocess import call
from urllib import urlretrieve
import pandas as pd
from docopt import docopt
# download and extract from ensemble
# save_directory = "/home/data/peptides/"
# filename = "Homo_sapiens.GRCh38.pep.all.fixheader.fa"
# final_file = save_directory + filename
# if not os.path.isfile(final_file):
# print("downloading the protein sequences")
# urlretrieve("ftp://ftp.ensembl.org/pub/release-86/fasta/homo_sapiens/pep/" + filename + ".gz",
# final_file + ".gz")
# call(["gunzip", final_file + ".gz"])
# download the fasta file
if __name__ == "__main__":
arguments = docopt(__doc__)
PEPTIDE_LENGTH = 9
# get arguments
if arguments["--alleles"]:
alleles = arguments["--alleles"].split(",")
else:
alleles = None
file_in = arguments["--input"]
file_out = arguments["--output"]
print("read fasta")
proteins = read_fasta(file_in, id_position=0, in_type=Protein)
# restrict to only top N proteins if provided
if arguments["--top_N"]:
Nargs = int(arguments["--top_N"])
N = min(Nargs, len(proteins))
proteins = proteins[0:N]
# parse peptide/protein information from Peptide list and Protein list
print("setup peptide/protein information table")
peptides = generate_peptides_from_proteins(proteins, PEPTIDE_LENGTH)
peptides_list = [peptide for peptide in peptides]
proteins_list = [peptide.proteins.keys()[0] for peptide in peptides_list]
peptides_str_list = [peptide.tostring() for peptide in peptides_list]
peptides_position_list = [peptide.proteinPos.items()[0][1][0] for peptide in peptides_list]
dt_peptides = pd.DataFrame({"peptide": peptides_str_list,
"peptide_position": peptides_position_list,
"transcript_id": proteins_list}
)
# predict the effect for each unique peptide
print("predict the effects")
res = fred2wrap.predict_peptide_effects(peptides_list, alleles = alleles)
res["peptide"] = [peptide.tostring() for peptide in res["peptide"]]
# map peptides back to proteins
full = pd.merge(dt_peptides, res, how = 'left', on = "peptide")
print("write to csv")
full.to_csv(file_out, index = False)
|
mit
| -4,794,265,332,252,624,000
| 39.164706
| 108
| 0.660808
| false
| 3.120658
| false
| false
| false
|
dougiefresh1233/MyoTurtle
|
Turtle1.py
|
1
|
1796
|
#!/usr/bin/python
''' DON'T TOUCH THESE FIRST LINES! '''
''' ============================== '''
from PyoConnect import *
myo = Myo(sys.argv[1] if len(sys.argv) >= 2 else None)
''' ============================== '''
''' OK, edit below to make your own fancy script ^.^ '''
# Edit here:
import time
import os
import turtle
myo.unlock("hold")
def onPoseEdge(pose,edge):
if(pose=="fist") and (edge=='on'):
myo.vibrate(1)
speed=myo.getPitch()*25
turtle.forward(speed)
#print('Fist')
elif(pose=="waveIn")and (edge=='on'):
myo.vibrate(1)
angle=abs(myo.getRoll())*25
turtle.left(angle)
#print('Wave In')
#print(myo.getRoll())
elif(pose=='waveOut')and (edge=='on'):
myo.vibrate(1)
angle=-(myo.getRoll()*25)
turtle.right(angle)
#print('Wave Out')
elif(pose=="fingersSpread")and (edge=='on'):
myo.vibrate(2)
myo.rotSetCenter();
#print('Spread')
elif(pose=='doubleTap')and (edge=='on'):
myo.vibrate(1)
loc=myo.getBox()
if (loc==0):
coordx=0
coordy=0
elif(loc==1):
coordx=0
coordy=-100
elif(loc==2):
coordx=100
coordy=-100
elif(loc==3):
coordx=100
coordy=0
elif(loc==4):
coordx=100
coordy=100
elif(loc==5):
coordx=0
coordy=100
elif(loc==6):
coordx=-100
coordy=100
elif(loc==7):
coordx=-100
coordy=0
elif(loc==8):
coordx=-100
coordy=-100
turtle.setposition(coordx,coordy)
#print('Double Tap')
# Stop editting
# Comment out below the events you are not using
#myo.onLock = onLock
#myo.onUnlock = onUnlock
myo.onPoseEdge = onPoseEdge
#myo.onPeriodic = onPeriodic
#myo.onWear = onWear
#myo.onUnwear = onUnwear
#myo.onEMG = onEMG
#myo.onBoxChange = onBoxChange
''' DON'T TOUCH BELOW THIS LINE! '''
''' ============================ '''
myo.connect()
while True:
myo.run()
myo.tick()
|
mit
| 5,862,014,549,615,499,000
| 19.179775
| 56
| 0.603007
| false
| 2.420485
| false
| false
| false
|
1200wd/1200wd_addons
|
account_bank_match/models/account_bank_match.py
|
1
|
10826
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Account Bank Match
# Copyright (C) 2016 May
# 1200 Web Development
# http://1200wd.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO: Do not open old reconcile view when importing bank statements
from openerp.tools.translate import _
import logging
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
from openerp.exceptions import ValidationError
import re
from datetime import date, timedelta
_logger = logging.getLogger(__name__)
# Object to store reference patterns of orders and invoices to look for in statement lines
class AccountBankMatchReference(models.Model):
_name = "account.bank.match.reference"
_order = "sequence,name"
name = fields.Char(string="Reference Pattern", size=256,
help="Regular expression pattern to match reference")
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
('account.account', 'Account'),
('res.partner', 'Partner'),
], select=True, required=True
)
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=True, help='Set to inactive to disable Match Reference')
account_journal_id = fields.Many2one('account.journal', string='Journal Filter',
help='Match only applies to selected journal. Leave empty to match all journals.', ondelete="cascade")
score = fields.Integer("Score to Share", default=0, required=True, help="Total score to share among all matches of this rule. If 3 matches are found and the score to share is 30 then every match gets a score of 10.")
score_item = fields.Integer("Score per Match", default=0, required=True, help="Score for each match. Will be added to the shared score.")
company_id = fields.Many2one('res.company', string='Company', required=True, ondelete="cascade")
account_account_id = fields.Many2one('account.account', string="Resulting Account", ondelete="cascade",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]")
partner_bank_account = fields.Char(string="Partner Bank Account", size=64, help="Remote owner bank account number to match")
# FIXME: Disabled because it causes problems when matching with account_journal_id and empty names
# _sql_constraints = [
# ('reference_pattern_name_company_unique', 'unique (name, model, company_id)', 'Use reference pattern only once for each model and for each Company')
# ]
@api.one
@api.constrains('name')
def _check_name_format(self):
if self.name and re.search(r"\s", self.name):
raise ValidationError('Please enter reference pattern without any whitespace character such as space or tab')
@api.one
def copy(self, default=None):
default = dict(default or {})
default['name'] = _('%s_copy') % self.name
return super(AccountBankMatchReference, self).copy(default)
class AccountBankMatchReferenceCreate(models.TransientModel):
_name = "account.bank.match.reference.create"
name = fields.Char(string="Reference Pattern", size=256,
help="Regular expression pattern to match reference. Leave emtpy to only match on Bank Account")
partner_bank_account = fields.Char(string="Partner Bank Account", size=64, help="Remote owner bank account number to match")
account_journal_id = fields.Many2one('account.journal', string='Journal Filter', ondelete="cascade",
help='Match only applies to selected journal. Leave empty to match all journals.')
company_id = fields.Many2one('res.company', string='Company', required=True, ondelete="cascade")
account_account_id = fields.Many2one('account.account', string="Resulting Account", ondelete="cascade",
domain="[('type', 'in', ['other','receivable','liquidity','payable']), ('company_id', '=', company_id)]")
@api.multi
def action_match_reference_save(self):
data = {
'name': self.name,
'model': 'account.account',
'sequence': 50,
'account_journal_id': self.account_journal_id.id,
'score_item': 100,
'company_id': self.company_id.id,
'account_account_id': self.account_account_id.id,
'partner_bank_account': self.partner_bank_account,
}
self.env['account.bank.match.reference'].create(data)
# Object to store found matches to orders/invoices in statement lines
class AccountBankMatch(models.Model):
_name = "account.bank.match"
@api.model
def _get_default_writeoff(self):
configs = self.env['account.config.settings'].get_default_bank_match_configuration(self)
return configs.get('match_writeoff_journal_id') or 0
name = fields.Char(string="Reference", size=32, required=True,
help="Reference of match to order, invoice or account")
so_ref = fields.Char('Sale Order Reference')
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
('account.account', 'Account'),
], select=True, required=True
)
statement_line_id = fields.Many2one('account.bank.statement.line', string="Bank Statement Line",
required=True, index=True, ondelete="cascade")
description = fields.Char(string="Description", size=256)
score = fields.Integer("Score")
writeoff_journal_id = fields.Many2one('account.journal', string="Write-off Journal", ondelete="cascade",
default=_get_default_writeoff)
writeoff_difference = fields.Boolean("Write-off Payment Difference", default=True)
match_selected = fields.Boolean("Winning match", default=False)
# TODO: Add constraint statement_line_id and name must be unique
@api.multi
def cron_cleanup_matches(self):
try:
datestr = (date.today() - timedelta(days=7)).__str__()
self._cr.execute("DELETE FROM account_bank_match abm WHERE abm.create_date < %s", (datestr,))
except AttributeError:
return False
return True
@api.multi
def compute_payment_difference(self):
for m in self:
if m.model == 'account.invoice':
SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
invoice = self.env[m.model].search([('number', '=', m.name)])
if not invoice:
_logger.debug("1200wd - compute_payment_difference - invoice %s not found" % m.name)
m.payment_difference = 0
else:
direction = SIGN[invoice.type]
m.payment_difference = invoice.residual + (direction * m.statement_line_id.amount)
else:
# TODO: Add difference calculation for sale.order model
m.payment_difference = 0
payment_difference = fields.Float(string="Payment Difference", digits=dp.get_precision('Account'),
readonly=True, compute='compute_payment_difference')
@api.multi
def action_match_confirm(self):
self.ensure_one()
self.statement_line_id.show_errors = True
self.match_selected = True
vals = {}
if self.model == 'sale.order':
vals['so_ref'] = self.name
vals['name'] = '/'
elif self.model == 'account.invoice':
vals['name'] = self.name or '/'
elif self.model == 'account.account':
account_id = int(self.name) or 0
self.statement_line_id.create_account_move(account_id)
vals = self.statement_line_id.order_invoice_lookup(vals)
self.statement_line_id.write(vals)
if self.model != 'account.account':
self.statement_line_id.auto_reconcile(type='manual')
return True
# Object to store found matches to orders/invoices in statement lines
class AccountBankMatchRule(models.Model):
"""
Example Rule:
{ 'name': "Sale Order amount match",
'score_per_match': 100,
'rule': "[('amount', '>', '@sale_order.amount-0.01@'), ('amount', '<', '@sale_order.amount-0.01@')]"
'type': "sale.order"
"""
_name = "account.bank.match.rule"
name = fields.Char(string="Title", size=256, required=True)
model = fields.Selection(
[
('sale.order', 'Sale Order'),
('account.invoice', 'Invoice'),
# ('account.move.line', 'Account Move'),
('res.partner', 'Partner'),
('account.bank.statement.line','Bank Statement Line'),
], select=True, required=True, help="Model used for search rule"
)
score = fields.Integer("Score to Share", default=0, required=True, help="Total score to share among all matches of this rule. If 3 matches are found and the score to share is 30 then every match gets a score of 10.")
score_item = fields.Integer("Score per Match", default=0, required=True, help="Score for each match. Will be added to the shared score.")
active = fields.Boolean('Active', default=True, help='Set to inactive to disable rule')
type = fields.Selection(
[
('extraction', 'Extraction'),
('bonus', 'Bonus'),
], select=True, required=True, default='extraction')
rule = fields.Text(string="Match Rule", required=True,
help="Rule to match a bank statement line to a sale order, invoice or account move. The rules should follow the Odoo style domain format.")
script = fields.Text(string="Run Script",
help="Run Python code after rule matched. Be carefull what you enter here, wrong code could damage your Odoo database")
company_id = fields.Many2one('res.company', string='Company', ondelete="cascade", required=False)
|
agpl-3.0
| -1,092,523,175,278,956,900
| 46.69163
| 220
| 0.626362
| false
| 4.189628
| false
| false
| false
|
teampopong/pokr.kr
|
pokr/scripts/meeting.py
|
1
|
5785
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
from datetime import datetime
import hashlib
import json
import logging
import re
from popong_models import Base
from popong_data_utils import guess_person
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from pokr.database import db_session, transaction
from pokr.models import Meeting, Person, Statement
from utils.command import Command
attendance_re = re.compile(r'(출|참)석\s*(감사위|의|위)원')
class MeetingCommand(Command):
__command__ = 'meeting'
class InsertMeetingCommand(Command):
__command__ = 'insert'
__parent__ = MeetingCommand
@classmethod
def init_parser_options(cls):
cls.parser.add_argument('files', type=argparse.FileType('r'), nargs='+')
cls.parser.add_argument('-r', dest='region_id', required=True)
@classmethod
def run(cls, files, region_id, **kwargs):
Base.query = db_session.query_property()
for file_ in files:
obj = json.load(file_)
insert_meetings(region_id, obj)
class UpdateMeetingCommand(Command):
__command__ = 'update'
__parent__ = MeetingCommand
@classmethod
def init_parser_options(cls):
cls.parser.add_argument('files', type=argparse.FileType('r'), nargs='+')
cls.parser.add_argument('-r', dest='region_id', required=True)
@classmethod
def run(cls, files, region_id, **kwargs):
Base.query = db_session.query_property()
for file_ in files:
obj = json.load(file_)
insert_meetings(region_id, obj, update=True)
def insert_meetings(region_id, obj, update=False):
try:
if isinstance(obj, dict):
insert_meeting(region_id, obj, update)
elif isinstance(obj, list):
for o in obj:
insert_meeting(region_id, o, update)
else:
raise Exception()
except KeyError, e:
logging.warn('KeyError: %s' % e)
def strhash(s, len=4):
if isinstance(s, unicode):
s = s.encode('utf-8')
return int(hashlib.md5(s).hexdigest()[:len], 16)
def create_or_get_meeting(session, region_id, obj, update=False):
date = datetime.strptime(obj['date'], '%Y-%m-%d').date()
session_id = obj['session_id'] or ''
meeting_id = obj['meeting_id'] or ''
id = int('{region_id}{assembly_id}{session_id}{meeting_id}{md5}'.format(
region_id=region_id,
assembly_id=obj['assembly_id'],
session_id=int(session_id) if session_id.isdigit() else strhash(session_id),
meeting_id=int(meeting_id) if meeting_id.isdigit() else strhash(meeting_id),
md5=strhash(obj['committee'])))
meeting = session.query(Meeting).filter_by(id=id).first()
if not update and meeting:
logging.info('Skip {id}'.format(id=id))
return
if not meeting:
meeting = Meeting(
id=id,
region_id=region_id,
committee=obj['committee'],
parliament_id=obj['assembly_id'],
session_id=session_id,
sitting_id=meeting_id,
date=date,
issues=obj['issues'],
url='',
pdf_url=obj['pdf'],
)
return meeting
def insert_meeting(region_id, obj, update=False):
with transaction() as session:
meeting = create_or_get_meeting(session, region_id, obj, update)
if not meeting:
return
session.add(meeting)
# 'meeting_attendee' table
attendee_names = get_attendee_names(obj)
attendees = list(get_attendees(meeting, attendee_names, session))
meeting.attendees = attendees
# clear the meeting's statements
for statement in meeting.statements:
session.delete(statement)
session.flush()
meeting.statements = []
# 'statement' table
statements = (stmt for stmt in obj['dialogue']
if stmt['type'] == 'statement')
for seq, statement in enumerate(statements):
item = create_statement(meeting, seq, statement, attendees)
session.add(item)
session.flush()
statement['person_id'] = item.person_id
statement['id'] = item.id
meeting.statements.append(item)
# Updated dialog field of meeting table
meeting.dialogue = obj['dialogue']
# TODO: votes = obj['votes']
def get_attendee_names(obj):
for key, val in obj.get('attendance', {}).iteritems():
if attendance_re.match(key):
return val['names']
logging.warning('Attendance not found {date}-{committee}'.format(**obj))
return []
def get_attendees(meeting, names, session=None):
for name in names:
try:
person = guess_person(name=name, assembly_id=meeting.parliament_id,
is_elected=True)
# FIXME: workaround different session binding problem
yield session.query(Person).filter_by(id=person.id).one()
except MultipleResultsFound as e:
logging.warning('Multiple result found for: %s' % name)
except NoResultFound as e:
logging.warning('No result found for: %s' % name)
def create_statement(meeting, seq, statement, attendees):
person = guess_attendee(attendees, statement['person'])
item = Statement(
meeting_id=meeting.id,
person_id=person.id if person else None,
sequence=seq,
speaker=statement['person'],
content=statement['content'],
)
return item
def guess_attendee(attendees, name):
for attendee in attendees:
if attendee.name in name:
return attendee
return None
|
apache-2.0
| 7,019,730,062,019,335,000
| 30.005376
| 89
| 0.608635
| false
| 3.744805
| false
| false
| false
|
sofianehaddad/gosa
|
pygosa/tests/ishigami_uc.py
|
1
|
1769
|
# -*- Python -*-
#
# @file ishigami_uc.py
# @brief Ishigami use case, probabilistic and functions models
#
# Copyright (C) 2017 Airbus-IMACS
#
# Written by Sofiane Haddad, haddad@imacs.polytechnique.fr
# Nabil Rachdi, nabil.rachdi@airbus.com
#
# This program is free software; you can redistribute it and/or
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
import openturns as ot
from math import pi
def define_ishigami_function():
"""
Model function of Ishigami use case
"""
# The Ishigami function
# Problem parameters
dimension = 3
a = 7.0
b = 0.1
# Create the Ishigami function
input_variables = ["xi1","xi2","xi3"]
formula = ["sin(xi1) + (" + str(a) + ") * (sin(xi2)) ^ 2 + (" + str(b) + ") * xi3^4 * sin(xi1)"]
ishigami_model = ot.SymbolicFunction(input_variables, formula)
ishigami_model.setName("Ishigami")
return ishigami_model
def define_ishigami_distribution():
"""
Probabilistic model of Ishigami use case
"""
# Create the input distribution
marginals = 3 * [ot.Uniform(-pi, pi)]
distribution_ishigami = ot.ComposedDistribution(marginals)
return distribution_ishigami
|
lgpl-3.0
| 7,691,787,042,618,716,000
| 33.019231
| 100
| 0.66987
| false
| 3.363118
| false
| false
| false
|
kennel209/bilibili-download-helper
|
bilibili_download_helper.py
|
1
|
10476
|
#! /usr/bin/env python3
# coding: utf8
import sys
import os
import argparse
from helpers.utils import debug,set_debug
from helpers.utils import show_size
from helpers.url_generater import generate_urls, get_url_index
from helpers import downloaders
from helpers import bilibili_info_extractor
from helpers import video_process
from time import sleep
DEBUG=False
def download(baseurl,
range_=0,
start=0,
name_prefix="",
info_extract=None,
downloader=None,
dry_run=False,
src_format='flv',
to_ext='mp4',
titles=False,
retry_time=3):
u'''主函数,批量生成url,使用下载器下载'''
# correct start
if start <= 0:
start = 1
if range_ <= 0:
fixed_prefix = True
range_ = 1
else:
fixed_prefix = False
url_gen = generate_urls(baseurl,range_,start)
for url in url_gen:
# Flags
RETRY = retry_time
SUCC = False
while RETRY > 0 and SUCC == False:
# prevent overspeed
sleep(0.5)
info = info_extract(url,src_format)
# 根据不同情况生成文件名
ext = info[1]
index = get_url_index(url)
if index is None:
print("ERROR in extract INDEX, EXIT")
sys.exit(1)
if titles:
title_name = titles[int(index)-1]
if name_prefix == "":
filename = index
elif fixed_prefix:
filename = name_prefix
else:
if titles:
filename = os.sep.join([name_prefix,title_name])
else:
filename = "_".join([name_prefix,index])
file_name = ".".join([filename,ext])
# first run
if RETRY == retry_time:
# print INFO
print("-"*40)
print("{} -> {}".format(url,file_name))
print("Split URL part: {}".format(len(info[0])))
if info[2]:
print("Size: {}".format(show_size(info[2])))
print("-"*40)
print("")
if len(info[0]) > 1:
# 多分段
# check if file existed
# treat as downloaded
if to_ext != ext:
file_name = ".".join([filename,to_ext])
if os.path.exists(file_name):
print("{} has downloaded, skip".format(file_name))
SUCC = True
continue
parts=[]
for part,part_url in enumerate(info[0]):
part_index = "[{:02d}]".format(part)
part_name = ".".join([filename,part_index,ext])
parts.append(part_name)
debug("URL part: {} -> {}".format(part_index,part_name))
if dry_run:
SUCC = True
continue
res = downloader.download(info[0],parts)
if not res:
RETRY -= 1
print("Retrying...{} Left".format(RETRY))
continue
else:
SUCC = True
# POST process, merge & convert
print("Try Merging: {}".format(file_name))
result = video_process.merge_video(ext,parts,filename,to_ext)
# successful merged, delete parts_file
if result:
for f in parts:
debug("removing {}".format(f))
os.remove(f)
else:
# 单分段
# NOTE: file duplication leave to external_downloader
# FIXME: No parts?
if len(info[0]) == 0:
print("No urls found")
RETRY -= 1
print("Retrying...{} Left".format(RETRY))
continue
if dry_run:
SUCC = True
continue
# support auto ext converter, check downloaded file
if to_ext != ext:
new_name = ".".join([filename,to_ext])
if os.path.exists(new_name):
print("{} has downloaded, skip".format(new_name))
SUCC = True
continue
res = downloader.download(info[0],[file_name])
if not res:
RETRY -= 1
print("Retrying...{} Left".format(RETRY))
continue
else:
SUCC = True
# POST process, convert
if to_ext != ext:
old_name = file_name
file_name = ".".join([filename,to_ext])
print("Try converting: {} -> {}".format(old_name,file_name))
result = video_process.merge_video(ext,[old_name],filename,to_ext)
# successful converted
if result:
debug("removing {}".format(old_name))
os.remove(old_name)
# print INFO
print("")
print("-"*40)
print("Done: {}".format(file_name))
print("-"*40)
print("")
if SUCC == False:
# TODO: auto skip?
print("Retry used up. Please retry manully")
sys.exit(1)
def do_work(args):
u'''分配命令,调用下载主函数'''
# url采集函数和下载器
extractor = url_handler.handler
downloader = downloaders.DOWNLOADERS[args.downloader]
if args.auto:
# auto mode
titles,index = bilibili_info_extractor.extract_info(args.baseurl)
# print INFO
print("-"*40)
print("Title: {}".format(titles[0]))
print("Parts: {}".format(1 if index == 0 else index))
pages=[]
for p_i in range(index):
print("Part {:02}: {}".format(p_i+1,titles[p_i+1]))
if args.add_index_prefix:
pages.append("{:02} {}".format(p_i+1, titles[p_i+1]))
else:
pages.append(titles[p_i+1])
print("-"*40)
print("")
# add start selector
if index == 0:
# do not worry about single part
range_ = index
start = 1
else:
start = args.start
if args.range > 0:
range_ = args.range
else:
range_ = index-start+1
download(args.baseurl,
range_=range_,
start=start,
name_prefix=titles[0],
info_extract=extractor,
downloader=downloader,
dry_run=args.dry_run,
src_format=args.src_format,
to_ext=args.to_ext,
titles=pages,
retry_time=args.retry)
else:
# normal mode
download(args.baseurl,
range_=args.range,
start=args.start,
name_prefix=args.prefix,
info_extract=extractor,
downloader=downloader,
dry_run=args.dry_run,
src_format=args.src_format,
to_ext=args.to_ext,
retry_time=args.retry)
def main():
u'''解析命令行参数'''
parser = argparse.ArgumentParser(description=u"A small script to help downloading Bilibily video via you-get & aria2")
parser.add_argument("baseurl",
help="bash to generate bilibili urls")
parser.add_argument("-a","--auto",
action="store_true",
help="automatic download all")
parser.add_argument("-f","--add-index-prefix",
action="store_true",
help="add index to Page auto naming")
parser.add_argument("-i","--range",
type=int,
default=0,
help="range to generate, 1 to index, 0 for current, no auto naming, default 0")
parser.add_argument("-s","--start",
type=int,
default=1,
help="start point, int, Default: +1")
parser.add_argument("-o","--prefix",
default="",
help="output filename prefix")
parser.add_argument("-g","--src-format",
default="flv",
help="prefer src format, may NOT work, ONLY FOR native backend, default flv, [flv, mp4]")
parser.add_argument("-t","--to-ext",
default="mp4",
help="output file extension, auto converted, default mp4")
parser.add_argument("-d","--downloader",
default="aria2",
help="external downloader, default aria2, [aria2,wget,fake]")
parser.add_argument("-n","--dry-run",
action="store_true",
help="just print info, do not actually downdloading")
parser.add_argument("-b","--backend",
default="native",
help="info extractor, default native, [native,youtube-dl,you-get]")
parser.add_argument("-r","--retry",
type=int,
default=3,
help="retry counts when download failed, default 3")
parser.add_argument("-v","--verbose",
action="store_true",
help="more info")
args = parser.parse_args()
assert args.start >= 1
assert args.range >= 0
# FIXME: quick hack
global url_handler
if args.backend == "you-get":
from helpers import you_get_json_handler as url_handler
elif args.backend == "youtube-dl":
from helpers import youtube_dl_handler as url_handler
else:
from helpers import native_json_handler as url_handler
#debug(repr(url_handler))
# 调试模式全局变量
mod = [url_handler, downloaders, video_process, bilibili_info_extractor]
set_debug( args.verbose, mod)
debug(args)
do_work(args)
if __name__=="__main__":
main()
|
gpl-3.0
| 6,170,788,742,443,519,000
| 31.211838
| 122
| 0.46441
| false
| 4.383213
| false
| false
| false
|
siavashk/pycpd
|
pycpd/emregistration.py
|
1
|
6331
|
from __future__ import division
import numpy as np
import numbers
from warnings import warn
def initialize_sigma2(X, Y):
(N, D) = X.shape
(M, _) = Y.shape
diff = X[None, :, :] - Y[:, None, :]
err = diff ** 2
return np.sum(err) / (D * M * N)
class EMRegistration(object):
"""
Expectation maximization point cloud registration.
Attributes
----------
X: numpy array
NxD array of target points.
Y: numpy array
MxD array of source points.
TY: numpy array
MxD array of transformed source points.
sigma2: float (positive)
Initial variance of the Gaussian mixture model.
N: int
Number of target points.
M: int
Number of source points.
D: int
Dimensionality of source and target points
iteration: int
The current iteration throughout registration.
max_iterations: int
Registration will terminate once the algorithm has taken this
many iterations.
tolerance: float (positive)
Registration will terminate once the difference between
consecutive objective function values falls within this tolerance.
w: float (between 0 and 1)
Contribution of the uniform distribution to account for outliers.
Valid values span 0 (inclusive) and 1 (exclusive).
q: float
The objective function value that represents the misalignment between source
and target point clouds.
diff: float (positive)
The absolute difference between the current and previous objective function values.
P: numpy array
MxN array of probabilities.
P[m, n] represents the probability that the m-th source point
corresponds to the n-th target point.
Pt1: numpy array
Nx1 column array.
Multiplication result between the transpose of P and a column vector of all 1s.
P1: numpy array
Mx1 column array.
Multiplication result between P and a column vector of all 1s.
Np: float (positive)
The sum of all elements in P.
"""
def __init__(self, X, Y, sigma2=None, max_iterations=None, tolerance=None, w=None, *args, **kwargs):
if type(X) is not np.ndarray or X.ndim != 2:
raise ValueError(
"The target point cloud (X) must be at a 2D numpy array.")
if type(Y) is not np.ndarray or Y.ndim != 2:
raise ValueError(
"The source point cloud (Y) must be a 2D numpy array.")
if X.shape[1] != Y.shape[1]:
raise ValueError(
"Both point clouds need to have the same number of dimensions.")
if sigma2 is not None and (not isinstance(sigma2, numbers.Number) or sigma2 <= 0):
raise ValueError(
"Expected a positive value for sigma2 instead got: {}".format(sigma2))
if max_iterations is not None and (not isinstance(max_iterations, numbers.Number) or max_iterations < 0):
raise ValueError(
"Expected a positive integer for max_iterations instead got: {}".format(max_iterations))
elif isinstance(max_iterations, numbers.Number) and not isinstance(max_iterations, int):
warn("Received a non-integer value for max_iterations: {}. Casting to integer.".format(max_iterations))
max_iterations = int(max_iterations)
if tolerance is not None and (not isinstance(tolerance, numbers.Number) or tolerance < 0):
raise ValueError(
"Expected a positive float for tolerance instead got: {}".format(tolerance))
if w is not None and (not isinstance(w, numbers.Number) or w < 0 or w >= 1):
raise ValueError(
"Expected a value between 0 (inclusive) and 1 (exclusive) for w instead got: {}".format(w))
self.X = X
self.Y = Y
self.TY = Y
self.sigma2 = initialize_sigma2(X, Y) if sigma2 is None else sigma2
(self.N, self.D) = self.X.shape
(self.M, _) = self.Y.shape
self.tolerance = 0.001 if tolerance is None else tolerance
self.w = 0.0 if w is None else w
self.max_iterations = 100 if max_iterations is None else max_iterations
self.iteration = 0
self.diff = np.inf
self.q = np.inf
self.P = np.zeros((self.M, self.N))
self.Pt1 = np.zeros((self.N, ))
self.P1 = np.zeros((self.M, ))
self.Np = 0
def register(self, callback=lambda **kwargs: None):
self.transform_point_cloud()
while self.iteration < self.max_iterations and self.diff > self.tolerance:
self.iterate()
if callable(callback):
kwargs = {'iteration': self.iteration,
'error': self.q, 'X': self.X, 'Y': self.TY}
callback(**kwargs)
return self.TY, self.get_registration_parameters()
def get_registration_parameters(self):
raise NotImplementedError(
"Registration parameters should be defined in child classes.")
def update_transform(self):
raise NotImplementedError(
"Updating transform parameters should be defined in child classes.")
def transform_point_cloud(self):
raise NotImplementedError(
"Updating the source point cloud should be defined in child classes.")
def update_variance(self):
raise NotImplementedError(
"Updating the Gaussian variance for the mixture model should be defined in child classes.")
def iterate(self):
self.expectation()
self.maximization()
self.iteration += 1
def expectation(self):
P = np.sum((self.X[None, :, :] - self.TY[:, None, :]) ** 2, axis=2)
c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N
P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den == 0] = np.finfo(float).eps
den += c
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
def maximization(self):
self.update_transform()
self.transform_point_cloud()
self.update_variance()
|
mit
| 3,315,548,524,244,623,000
| 33.407609
| 115
| 0.607013
| false
| 4.119063
| false
| false
| false
|
etingof/pysnmp
|
pysnmp/carrier/asyncore/dispatch.py
|
1
|
1809
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from asyncore import loop
from asyncore import socket_map
from sys import exc_info
from time import time
from traceback import format_exception
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
# use own map for MT safety
self.__sockMap = {}
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self):
return self.__sockMap
def setSocketMap(self, sockMap=socket_map):
self.__sockMap = sockMap
def registerTransport(self, transportDomain, transport):
AbstractTransportDispatcher.registerTransport(
self, transportDomain, transport)
transport.registerSocket(self.__sockMap)
def unregisterTransport(self, transportDomain):
self.getTransport(transportDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, transportDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return True
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout or self.getTimerResolution(),
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except Exception:
raise PySnmpError(
'poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
|
bsd-2-clause
| 8,699,179,269,854,278,000
| 31.303571
| 79
| 0.666667
| false
| 4.380145
| false
| false
| false
|
thinkst/canarytokend
|
canarytokend/canarytoken.py
|
1
|
1648
|
import sys
import requests
class Canarytoken(object):
def _setup_token(self,module=None, email=None):
print "Generating a new canarytoken for {module}".format(module=module)
while True:
if not email:
print "Please enter an email address for receiving alerts: ",
self.email = sys.stdin.readline().strip()
else:
print "Please enter an email address for receiving alerts "+\
"({email}): ".format(email=email),
self.email = sys.stdin.readline().strip()
if len(self.email) == 0:
self.email = email
if len(self.email) > 0:
break
print "Please enter a short description to remind you about this token: ",
self.memo = sys.stdin.readline().strip()
def _request_token(self,):
resp = requests.post('http://canarytokens.org/generate',
params={'email':self.email,'memo':self.memo})
resp = resp.json()
if resp['Error'] is not None:
raise Exception('An error occurred requesting token: {errorcode}'\
.format(errorcode=resp['Error']))
if resp['Token'] == '':
raise Exception('An error occurred request token: {error}'\
.format(error='No token was returned'))
self.dns_token = resp['Hostname']
self.canary_token = resp['Token']
def fetch_token(self, module=None, email=None):
self._setup_token(module=module, email=email)
self._request_token()
return self
|
bsd-3-clause
| -7,144,678,589,862,736,000
| 40.2
| 82
| 0.551578
| false
| 4.490463
| false
| false
| false
|
idlesign/django-sitetree
|
sitetree/tests/test_other.py
|
1
|
3701
|
from sitetree.settings import ALIAS_TRUNK
def test_stress(template_render_tag, template_context, template_strip_tags, build_tree, common_tree):
build_tree(
{'alias': 'othertree'},
[{'title': 'Root', 'url': '/', 'children': [
{'title': 'Other title', 'url': '/contacts/russia/web/private/'},
{'title': 'Title_{{ myvar }}', 'url': '/some/'}
]}],
)
context = template_context(context_dict={'myvar': 'myval'}, request='/contacts/russia/web/private/')
title = template_render_tag('sitetree', 'sitetree_page_title from "mytree"', context)
title_other = template_render_tag('sitetree', 'sitetree_page_title from "othertree"', context)
hint = template_render_tag('sitetree', 'sitetree_page_hint from "mytree"', context)
description = template_render_tag('sitetree', 'sitetree_page_description from "mytree"', context)
tree = template_strip_tags(template_render_tag('sitetree', 'sitetree_tree from "mytree"', context))
breadcrumbs = template_strip_tags(template_render_tag('sitetree', 'sitetree_breadcrumbs from "mytree"', context))
menu = template_render_tag('sitetree', f'sitetree_menu from "mytree" include "{ALIAS_TRUNK}"', context)
menu_other = template_render_tag('sitetree', f'sitetree_menu from "othertree" include "{ALIAS_TRUNK}"', context)
assert title == 'Private'
assert title_other == 'Other title'
assert hint == 'Private Area Hint'
assert description == 'Private Area Description'
assert breadcrumbs == 'Home|>|Russia|>|Web|>|Private'
assert template_strip_tags(menu) == 'Home|Users|Moderators|Ordinary|Articles|About cats|Good|Bad|Ugly|About dogs|' \
'Contacts|Russia|Web|Public|Private|Postal|Australia|Darwin|China'
assert 'current_item current_branch">Private' in menu
assert template_strip_tags(menu_other) == 'Root|Other title|Title_myval'
assert 'current_item current_branch">Other title' in menu_other
assert tree == 'Home|Users|Moderators|Ordinary|Articles|About cats|Good|Bad|Ugly|About dogs|About mice|Contacts|' \
'Russia|Web|Public|Private|Australia|Darwin|China'
def test_lazy_title(template_context):
from sitetree.sitetreeapp import LazyTitle, get_sitetree
assert LazyTitle('one') == 'one'
title = LazyTitle('here{% no_way %}there')
get_sitetree().current_page_context = template_context()
assert title == 'herethere'
def test_customized_tree_handler(template_context):
from sitetree.sitetreeapp import get_sitetree
assert get_sitetree().customized # see MySiteTree
def test_techincal_view_exception_unmasked(request_client, settings):
# We expect that customized 500 template using sitetree is handled as expected.
client = request_client(raise_exceptions=False)
response = client.get('/raiser/')
assert response.content == b'\n\n<ul>\n\t\n</ul>'
def test_urlquote(request_client, build_tree, template_render_tag, template_strip_tags, template_context, request_get):
build_tree(
{'alias': 'bogustree'},
[{'title': 'HOME', 'url': '/', 'children': [
{'title': 'Reports', 'url': '/reports', 'children': [
{'title': 'Devices {{ grp }}', 'urlaspattern': True, 'url': 'devices_grp grp'},
]},
]}],
)
name = 'Устройство10x 45.9:(2)=S+5' # handle both non-ascii and special chars as )(
context = template_context(context_dict={'grp': name}, request=f'/devices/{name}')
breadcrumbs = template_strip_tags(
template_render_tag('sitetree', 'sitetree_breadcrumbs from "bogustree"', context))
assert name in breadcrumbs
|
bsd-3-clause
| 5,155,784,559,334,490,000
| 40.943182
| 120
| 0.665673
| false
| 3.478794
| false
| false
| false
|
unho/translate
|
translate/tools/build_tmdb.py
|
1
|
3300
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Import units from translations files into tmdb."""
import logging
import os
from argparse import ArgumentParser
from translate.storage import factory, tmdb
logger = logging.getLogger(__name__)
class Builder:
def __init__(self, tmdbfile, source_lang, target_lang, filenames):
self.tmdb = tmdb.TMDB(tmdbfile)
self.source_lang = source_lang
self.target_lang = target_lang
for filename in filenames:
if not os.path.exists(filename):
logger.error("cannot process %s: does not exist", filename)
continue
elif os.path.isdir(filename):
self.handledir(filename)
else:
self.handlefile(filename)
self.tmdb.connection.commit()
def handlefile(self, filename):
try:
store = factory.getobject(filename)
except Exception as e:
logger.error(str(e))
return
# do something useful with the store and db
try:
self.tmdb.add_store(store, self.source_lang, self.target_lang, commit=False)
except Exception as e:
print(e)
print("File added:", filename)
def handlefiles(self, dirname, filenames):
for filename in filenames:
pathname = os.path.join(dirname, filename)
if os.path.isdir(pathname):
self.handledir(pathname)
else:
self.handlefile(pathname)
def handledir(self, dirname):
path, name = os.path.split(dirname)
if name in ["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]:
return
entries = os.listdir(dirname)
self.handlefiles(dirname, entries)
def main():
parser = ArgumentParser()
parser.add_argument(
"-d", "--tmdb", dest="tmdb_file", default="tm.db",
help="translation memory database file (default: %(default)s)")
parser.add_argument(
"-s", "--import-source-lang", dest="source_lang", default="en",
help="source language of translation files (default: %(default)s)")
parser.add_argument(
"-t", "--import-target-lang", dest="target_lang",
help="target language of translation files", required=True)
parser.add_argument(
"files", metavar="input files", nargs="+"
)
args = parser.parse_args()
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
Builder(args.tmdb_file, args.source_lang, args.target_lang, args.files)
if __name__ == '__main__':
main()
|
gpl-2.0
| -8,015,180,154,348,064,000
| 32
| 88
| 0.630909
| false
| 4.04908
| false
| false
| false
|
imh/gnss-analysis
|
gnss_analysis/constants.py
|
1
|
1307
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Ian Horn <ian@swiftnav.com>
# Bhaskar Mookerji <mookerji@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Shared constants.
"""
# Units conversations (length, time, and 8-bit fractional cycles)
MAX_SATS = 32
MSEC_TO_SECONDS = 1000.
MM_TO_M = 1000.
CM_TO_M = 100.
Q32_WIDTH = 256.
# Constants and settings ported from Piksi firmware
# Solution constants
TIME_MATCH_THRESHOLD = 2e-3
OBS_PROPAGATION_LIMIT = 10e-3
MAX_AGE_OF_DIFFERENTIAL = 1.0
OBS_N_BUFF = 5
# Solution state
SOLN_MODE_LOW_LATENCY = 0
SOLN_MODE_TIME_MATCHED = 1
DGNSS_SOLUTION_MODE = SOLN_MODE_LOW_LATENCY
# RTK filter state
FILTER_FLOAT = 0
FILTER_FIXED = 1
dgnss_filter_state = FILTER_FLOAT
# RTK SHIT
MIN_SATS = 4
# Use Ephemeris from the last four hours
EPHEMERIS_TOL = 3600 * 4
# Constants from libswiftnav (include/libswiftnav/constants.h)
MAX_CHANNELS = 11
MAX_SATS = 32
GPS_C = 299792458.0
|
lgpl-3.0
| 2,165,102,039,560,454,100
| 23.660377
| 78
| 0.739097
| false
| 2.910913
| false
| false
| false
|
mjabri/holoviews
|
holoviews/plotting/bokeh/annotation.py
|
1
|
1889
|
import numpy as np
from ...element import HLine, VLine
from .element import ElementPlot, text_properties, line_properties
class TextPlot(ElementPlot):
style_opts = text_properties
_plot_method = 'text'
def get_data(self, element, ranges=None):
mapping = dict(x='x', y='y', text='text')
return (dict(x=[element.x], y=[element.y],
text=[element.text]), mapping)
def get_extents(self, element, ranges=None):
return None, None, None, None
class LineAnnotationPlot(ElementPlot):
style_opts = line_properties
_plot_method = 'segment'
def get_data(self, element, ranges=None):
plot = self.handles['plot']
if isinstance(element, HLine):
x0 = plot.x_range.start
y0 = element.data
x1 = plot.x_range.end
y1 = element.data
elif isinstance(element, VLine):
x0 = element.data
y0 = plot.y_range.start
x1 = element.data
y1 = plot.y_range.end
return (dict(x0=[x0], y0=[y0], x1=[x1], y1=[y1]),
dict(x0='x0', y0='y0', x1='x1', y1='y1'))
def get_extents(self, element, ranges=None):
return None, None, None, None
class SplinePlot(ElementPlot):
"""
Draw the supplied Spline annotation (see Spline docstring).
Does not support matplotlib Path codes.
"""
style_opts = line_properties
_plot_method = 'bezier'
def get_data(self, element, ranges=None):
plot = self.handles['plot']
verts = np.array(element.data[0])
xs, ys = verts[:, 0], verts[:, 1]
return (dict(x0=[xs[0]], y0=[ys[0]], x1=[xs[-1]], y1=[ys[-1]],
cx0=[xs[1]], cy0=[ys[1]], cx1=[xs[2]], cy1=[ys[2]]),
dict(x0='x0', y0='y0', x1='x1', y1='y1',
cx0='cx0', cx1='cx1', cy0='cy0', cy1='cy1'))
|
bsd-3-clause
| -4,677,658,642,166,076,000
| 28.984127
| 73
| 0.553732
| false
| 3.268166
| false
| false
| false
|
kingrodriguez/oni-optimizer
|
settings.py
|
1
|
1579
|
def settings(dictionary):
with open('data/settings.json') as data_file:
settingsjsonold = json.load(data_file)
settingsjsonnew = {}
answerwrong = True
while answerwrong:
settingsanswer = input('Run Settingsprogramm? (yes/no/exit)')
if settingsanswer == "exit":
sys.exit("aborted by user")
elif settingsanswer == "yes":
for key in dictionary:
answerwrong = True
if debug == True:
settingsjsonnew[key] = ">"
else:
while answerwrong:
settingsjsonnew[key] = input("Should {} be greater, less or equal 0? Currently: '{}' (<,>,=,exit)".format(key,">" if settingsjsonold[key] else "<"))
if settingsjsonnew[key] == "exit":
sys.exit("aborted by user")
elif settingsjsonnew[key] == "<":
settingsjsonnew[key] = "<"
answerwrong = False
elif settingsjsonnew[key] == ">":
settingsjsonnew[key] = ">"
answerwrong = False
elif settingsjsonnew[key] == "=":
settingsjsonnew[key] = "="
answerwrong = False
answerwrong = False
f = open('data/settings.json', 'w')
f.write(json.dumps(settingsjsonnew, ensure_ascii=True))
elif settingsanswer == "no":
answerwrong = False
|
gpl-3.0
| -1,534,550,448,599,295,200
| 46.878788
| 173
| 0.474351
| false
| 4.888545
| false
| false
| false
|
micmc/compta
|
lib/compta/server/api/compte.py
|
1
|
4506
|
#!/usr/bin/python
# -*- coding: utf8 -*-
""" Application to create server for compta """
from compta.server.api.bottle import response, request, abort
from json import dumps, loads
#from sqlalchemy import desc
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import IntegrityError
#from sqlalchemy.sql import func
#from compta.db.base import Base
from compta.db.compte import Compte
from compta.db.compte import Compte
from compta.server.api.server import App
app = App().server
@app.get('/compte')
@app.get(r'/compte/<id:int>')
@app.get(r'/compte/<nom:re:[a-zA-Z\ ]+>')
@app.get(r'/banque/<banque_id:int>/compte')
@app.get(r'/banque/<banque_id:int>/compte/<id:int>')
@app.get('/banque/<banque_id:int>/compte/<nom:re:[a-zA-Z\ ]+>')
def list_compte(db, id=None, nom=None, banque_id=None):
""" List compte """
filter = {}
if id:
filter['id'] = id
elif nom:
filter['nom'] = nom
elif banque_id:
filter['banque_id'] = banque_id
else:
filter = App.get_filter(request.query.filter)
sort = App.get_sort(request.query.sort)
comptes = db.query(Compte)
if filter:
for column, value in filter.iteritems():
if not isinstance(value, list):
comptes = comptes.filter(getattr(Compte, column) == value)
else:
comptes = comptes.filter(getattr(Compte, column).in_(value))
if sort:
for column in sort:
comptes = comptes.order_by(getattr(Compte, column))
else:
comptes = comptes.order_by(Compte.nom)
try:
comptes = comptes.all()
except NoResultFound:
abort(404, "ID not found")
if not comptes:
abort(404, "ID not found")
list_comptes = []
attributs = App.get_attribut(request.query.attribut)
if attributs:
for compte in comptes:
dict_attributs = {}
for attribut in attributs:
dict_attributs[attribut] = getattr(compte, attribut)
list_comptes.append(dict_attributs)
else:
for compte in comptes:
list_comptes.append({'id': compte.id,
'nom': compte.nom,
'numero': compte.numero,
'cle': compte.cle,
'type': compte.type,
'archive': compte.archive,
'banque_id': compte.banque_id,
})
return dumps(list_comptes)
@app.post('/jtable/ListCompte')
def list_compte_jtable(db):
json_list = list_compte(db)
data_list = loads(json_list)
data = {
"Result": "OK",
"Records": data_list
}
return dumps(data)
@app.post('/compte')
def insert_compte(db):
""" Create a compte """
entity = App.check_data(Compte, request.body.readline())
if entity:
compte = Compte()
for column, value in entity.iteritems():
setattr(compte, column, value)
db.add(compte)
try:
db.commit()
except IntegrityError as ex:
abort(404, ex.args)
response.status = 201
response.headers["Location"] = "/compte/%s" % (compte.id,)
compte = loads(list_compte(db, compte.id))
return compte[0]
@app.put(r'/compte/<id:int>')
def update_compte(db, id=None):
""" Update information for a compte """
entity = App.check_data(Compte, request.body.readline())
if entity:
try:
compte = db.query(Compte).\
filter(Compte.id == id).\
one()
except NoResultFound:
abort(404, "ID not found")
for column, value in entity.iteritems():
if column == 'archive':
setattr(compte,
"archive",
App.convert_value(value)
)
else:
setattr(compte, column, value)
try:
db.commit()
compte = loads(list_compte(db, compte.id))
return compte[0]
except IntegrityError as ex:
abort(404, ex.args)
@app.delete(r'/compte/<id:int>')
def delete_compte(db, id=None):
""" Delete a compte """
try:
compte = db.query(Compte).\
filter(Compte.id == id).\
one()
except NoResultFound:
abort(404, "ID not found")
db.delete(compte)
db.commit()
return dumps({'id': id})
|
mit
| -1,812,105,595,531,268,000
| 29.445946
| 76
| 0.550155
| false
| 3.57619
| false
| false
| false
|
quantling/pyndl
|
setup.py
|
1
|
2509
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
import sys
# bootstrap numpy
# https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
pkg = __import__('pyndl')
author = pkg.__author__
email = pkg.__author_email__
version = pkg.__version__
classifiers = pkg.__classifiers__
description = pkg.__description__
def load_requirements(fn):
"""Read a requirements file and create a list that can be used in setup."""
with open(fn, 'r') as f:
return [x.rstrip() for x in list(f) if x and not x.startswith('#')]
ndl_parallel = Extension("pyndl.ndl_parallel", ["pyndl/ndl_parallel.pyx"])
ndl_openmp = Extension("pyndl.ndl_openmp", ["pyndl/ndl_openmp.pyx"],
extra_compile_args=['-fopenmp'], extra_link_args=['-fopenmp'])
corr_parallel = Extension("pyndl.correlation_openmp", ["pyndl/correlation_openmp.pyx"],
extra_compile_args=['-fopenmp'], extra_link_args=['-fopenmp'])
# by giving ``cython`` as ``install_requires`` this will be ``cythonized``
# automagically
ext_modules = []
if sys.platform.startswith('linux'):
ext_modules = [ndl_parallel, ndl_openmp, corr_parallel]
elif sys.platform.startswith('win32'):
ext_modules = [ndl_parallel] # skip openmp installation on windows for now
elif sys.platform.startswith('darwin'):
ext_modules = [ndl_parallel] # skip openmp installation on macos for now
setup(
name='pyndl',
version=version,
license='MIT',
description=description,
long_description=open('README.rst', encoding="utf-8").read(),
author=author,
author_email=email,
url='https://github.com/quantling/pyndl',
classifiers=classifiers,
platforms='Linux',
packages=['pyndl'],
setup_requires=['numpy', 'cython'],
install_requires=load_requirements('requirements.txt'),
extras_require={
'tests': [
'pylint',
'pytest',
'pycodestyle'],
'docs': [
'sphinx >= 1.4',
'sphinx_rtd_theme',
'numpydoc',
'easydev==0.9.35']},
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext}
)
|
mit
| 1,888,463,503,035,786,200
| 32.453333
| 94
| 0.644081
| false
| 3.538787
| false
| false
| false
|
patrickwestphal/owlapy
|
owlapy/vocab/namespaces.py
|
1
|
4322
|
from enum import Enum
import owlapy.model
class Status(Enum):
LEGACY = 0,
IN_USE = 1
class BuiltIn(Enum):
BUILT_IN = 0,
NOT_BUILT_IN = 1
class Namespaces(Enum):
OWL2 = ("owl2", "http://www.w3.org/2006/12/owl2#", Status.LEGACY)
OWL11XML = ("owl11xml", "http://www.w3.org/2006/12/owl11-xml#",
Status.LEGACY)
OWL11 = ("owl11", "http://www.w3.org/2006/12/owl11#", Status.LEGACY)
OWL = ("owl", "http://www.w3.org/2002/07/owl#", Status.IN_USE)
RDFS = ("rdfs", "http://www.w3.org/2000/01/rdf-schema#", Status.IN_USE)
RDF = ("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#", Status.IN_USE)
XSD = ("xsd", "http://www.w3.org/2001/XMLSchema#", Status.IN_USE)
XML = ("xml", "http://www.w3.org/XML/1998/namespace")
SWRL = ("swrl", "http://www.w3.org/2003/11/swrl#")
SWRLB = ("swrlb", "http://www.w3.org/2003/11/swrlb#")
SKOS = ("skos", "http://www.w3.org/2004/02/skos/core#")
GRDDL = ("grddl", "http://www.w3.org/2003/g/data-view#")
MA = ("ma", "http://www.w3.org/ns/ma-ont#")
PROV = ("prov", "http://www.w3.org/ns/prov#")
RDFA = ("rdfa", "http://www.w3.org/ns/rdfa#")
RIF = ("rif", "http://www.w3.org/2007/rif#")
R2RML = ("rr", "http://www.w3.org/ns/r2rml#")
SD = ("sd", "http://www.w3.org/ns/sparql-service-description#")
SKOSXL = ("skosxl", "http://www.w3.org/2008/05/skos-xl#")
POWDER = ("wdr", "http://www.w3.org/2007/05/powder#")
VOID = ("void", "http://rdfs.org/ns/void#")
POWDERS = ("wdrs", "http://www.w3.org/2007/05/powder-s#")
XHV = ("xhv", "http://www.w3.org/1999/xhtml/vocab#")
ORG = ("org", "http://www.w3.org/ns/org#")
GLDP = ("gldp", "http://www.w3.org/ns/people#")
CNT = ("cnt", "http://www.w3.org/2008/content#")
DCAT = ("dcat", "http://www.w3.org/ns/dcat#")
EARL = ("earl", "http://www.w3.org/ns/earl#")
HT = ("ht", "http://www.w3.org/2006/http#")
PTR = ("ptr", "http://www.w3.org/2009/pointers#")
CC = ("cc", "http://creativecommons.org/ns#")
CTAG = ("ctag", "http://commontag.org/ns#")
DCTERMS = ("dcterms", "http://purl.org/dc/terms/")
DC = ("dc", "http://purl.org/dc/elements/1.1/")
FOAF = ("foaf", "http://xmlns.com/foaf/0.1/")
GR = ("gr", "http://purl.org/goodrelations/v1#")
ICAL = ("ical", "http://www.w3.org/2002/12/cal/icaltzd#")
OG = ("og", "http://ogp.me/ns#")
REV = ("rev", "http://purl.org/stuff/rev#")
SIOC = ("sioc", "http://rdfs.org/sioc/ns#")
VCARD = ("vcard", "http://www.w3.org/2006/vcard/ns#")
SCHEMA = ("schema", "http://schema.org/")
GEO = ("geo", "http://www.w3.org/2003/01/geo/wgs84_pos#")
SC = ("sc", "http://purl.org/science/owl/sciencecommons/")
FB = ("fb", "http://rdf.freebase.com/ns/", Status.LEGACY)
GEONAMES = ("geonames", "http://www.geonames.org/ontology#", Status.LEGACY)
DBPEDIA = ("dbpedia", "http://dbpedia.org/resource/")
DBP = ("dbp", "http://dbpedia.org/property/")
DBO = ("dbo", "http://dbpedia.org/ontology/")
YAGO = ("yago", "http://dbpedia.org/class/yago/")
DOAP = ("doap", "http://usefulinc.com/ns/doap#")
def __init__(self, prefix, ns, status=Status.IN_USE,
built_in=BuiltIn.NOT_BUILT_IN):
"""
:param prefix: A short, human-readable, prefix name that matches, and
expands to the full IRI.
:param ns: The prefix IRI which matches the prefix name.
:param status: one of the values of the Status enum
:param built_in: one of the values of the BuiltIn enum
"""
self.prefix = prefix
self.prefix_name = prefix
self.ns = ns
self.prefix_iri = ns
self.status = status
self.built_in = built_in
def __str__(self):
return self.ns
def is_in_use(self):
return self.status == Status.IN_USE
def is_built_in(self):
return self.built_in == BuiltIn.BUILT_IN
def in_namespace(self, ns_str_or_iri):
"""
:param ns_str_or_iri: a string or owlapy.model.IRI to check
:return: boolean indicating whether str equals this namespace
"""
if isinstance(ns_str_or_iri, owlapy.model.IRI):
# return ns_str_or_iri.
return ns_str_or_iri.namespace == self.ns
else:
return ns_str_or_iri == self.ns
|
gpl-3.0
| -180,400,546,160,131,170
| 39.392523
| 79
| 0.568024
| false
| 2.577221
| false
| false
| false
|
Bilingual-Annotation-Task-Force/Scripts
|
Retagger.py
|
1
|
1875
|
#This code modifies a language identified gold standard from a 2-tag system (Eng|Span) to a 3-tag system(Eng|Span|Other)
#INPUT csv file with TOKEN, POS, LANG
##Lang = Eng | Span
##delimiter= , quotechar= "
#OUTPUT csv with TOKEN, POS, Lang
##Lang = Eng | Span | Other
##delimiter= , quotechar= "
##file name = input_file_name + "-retagged"
###USER input###
#select directory
directory = "/Users/jacqueline/Google Drive/Bullock Serigos Toribio/Bilingual Annotation/Data/"
#select input file (must be within the directory)
input_filename = "Solorio_GoldSt_7k.csv"
import os
import csv
from string import punctuation
import codecs
#change directory
os.chdir(directory)
#name for output file
output_filename = input_filename.replace(".csv", "-retagged.csv")
with open(input_filename, 'rU') as input, open(output_filename, 'wb') as output:
corpus_input = csv.reader(input, delimiter=',', quotechar='"', dialect=csv.excel_tab)
corpus_output = csv.writer(output, delimiter=',', quotechar='"')
for row in corpus_input:
if row[0] in punctuation:
row[2] = "Other"
if row[0].startswith("est"):
for x in row:
print x.decode("utf-8")
corpus_output.writerow(row)
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
###only successful printing of text in terminal
#save excel file as UTF 16 txt file
#open with: f = codecs.open("/Users/jacqueline/Desktop/Solorio_GoldSt_7k.txt", encoding = "latin_1").readlines()
|
mit
| 2,357,846,533,083,225,000
| 33.090909
| 120
| 0.693867
| false
| 3.194208
| false
| false
| false
|
hellodmp/segmentnet
|
utilities.py
|
1
|
5019
|
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
#interp_t_values = np.zeros_like(source,dtype=float)
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def sitk_show(nda, title=None, margin=0.0, dpi=40):
figsize = (1 + margin) * nda.shape[0] / dpi, (1 + margin) * nda.shape[1] / dpi
extent = (0, nda.shape[1], nda.shape[0], 0)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])
for k in range(0, nda.shape[2]):
print "printing slice " + str(k)
ax.imshow(np.squeeze(nda[:, :, k]),cmap ='gray', extent=extent, interpolation=None)
plt.draw()
plt.pause(0.3)
#plt.waitforbuttonpress()
def computeQualityMeasures(lP,lT):
quality=dict()
labelPred=sitk.GetImageFromArray(lP, isVector=False)
labelTrue=sitk.GetImageFromArray(lT, isVector=False)
hausdorffcomputer=sitk.HausdorffDistanceImageFilter()
hausdorffcomputer.Execute(labelTrue>0.5,labelPred>0.5)
quality["avgHausdorff"]=hausdorffcomputer.GetAverageHausdorffDistance()
quality["Hausdorff"]=hausdorffcomputer.GetHausdorffDistance()
dicecomputer=sitk.LabelOverlapMeasuresImageFilter()
dicecomputer.Execute(labelTrue>0.5,labelPred>0.5)
quality["dice"]=dicecomputer.GetDiceCoefficient()
return quality
def produceRandomlyDeformedImage(image, label, numcontrolpoints, stdDef):
sitkImage=sitk.GetImageFromArray(image, isVector=False)
sitklabel=sitk.GetImageFromArray(label, isVector=False)
transfromDomainMeshSize=[numcontrolpoints]*sitkImage.GetDimension()
tx = sitk.BSplineTransformInitializer(sitkImage,transfromDomainMeshSize)
params = tx.GetParameters()
paramsNp=np.asarray(params,dtype=float)
paramsNp = paramsNp + np.random.randn(paramsNp.shape[0])*stdDef
paramsNp[0:int(len(params)/3)]=0 #remove z deformations! The resolution in z is too bad
params=tuple(paramsNp)
tx.SetParameters(params)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(sitkImage)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(tx)
resampler.SetDefaultPixelValue(0)
outimgsitk = resampler.Execute(sitkImage)
outlabsitk = resampler.Execute(sitklabel)
outimg = sitk.GetArrayFromImage(outimgsitk)
outimg = outimg.astype(dtype=np.float32)
outlbl = sitk.GetArrayFromImage(outlabsitk)
outlbl = (outlbl>0.5).astype(dtype=np.float32)
return outimg,outlbl
def produceRandomlyTranslatedImage(image, label):
sitkImage = sitk.GetImageFromArray(image, isVector=False)
sitklabel = sitk.GetImageFromArray(label, isVector=False)
itemindex = np.where(label > 0)
randTrans = (0,np.random.randint(-np.min(itemindex[1])/2,(image.shape[1]-np.max(itemindex[1]))/2),np.random.randint(-np.min(itemindex[0])/2,(image.shape[0]-np.max(itemindex[0]))/2))
translation = sitk.TranslationTransform(3, randTrans)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(sitkImage)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(translation)
outimgsitk = resampler.Execute(sitkImage)
outlabsitk = resampler.Execute(sitklabel)
outimg = sitk.GetArrayFromImage(outimgsitk)
outimg = outimg.astype(dtype=float)
outlbl = sitk.GetArrayFromImage(outlabsitk) > 0
outlbl = outlbl.astype(dtype=float)
return outimg, outlbl
|
gpl-3.0
| 7,049,072,567,237,776,000
| 35.107914
| 185
| 0.706316
| false
| 3.321641
| false
| false
| false
|
jenfly/atmos-read
|
scripts/fram/run1_2014.py
|
1
|
9884
|
"""
3-D variables:
--------------
Instantaneous:
['U', 'V', 'OMEGA', 'T', 'QV', 'H']
Time-average:
['DUDTANA']
2-D variables:
--------------
Time-average surface fluxes:
['PRECTOT', 'EVAP', 'EFLUX', 'HFLUX', 'QLML', 'TLML']
Time-average vertically integrated fluxes:
['UFLXQV', 'VFLXQV', 'VFLXCPT', 'VFLXPHI']
Instantaneous vertically integrated fluxes:
['TQV']
Single-level atmospheric variables:
['PS', 'SLP']
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
# Download daily data
#version = 'merra'
#years = range(2006, 2016, 2)
version = 'merra2'
years = [2014]
months = np.arange(10, 13)
datadir = atm.homedir() + 'eady/datastore/' + version + '/daily/'
filestr = datadir + '%s_%s.nc'
varnms = ['U', 'V', 'OMEGA', 'T', 'QV', 'H', 'DUDTANA', 'PS',
'UFLXCPT', 'VFLXCPT', 'UFLXPHI', 'VFLXPHI']
latlon=(-90, 90, 40, 120)
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
sector_lons=(60, 100)
dp_vars = []
def group_variables(varnms, version):
"""Group variables together according to URL."""
def get_group(varnm, version):
opts = merra.url_opts(varnm, version)
group = '%s%s_%s_%s' % (opts['res'], opts['vertical'], opts['kind'],
opts['time_kind'])
return group
groups = {nm : get_group(nm, version) for nm in varnms}
keys = set(groups.values())
vargroups = collections.defaultdict(list)
for nm, key in groups.iteritems():
vargroups[key] += [nm]
return vargroups
def get_filename(var, version, datadir, year, month=None, day=None):
"""Return a filename for a variable."""
filenm = datadir + version + '_' + var.attrs['filestr'] + '_%d' % year
if month is not None:
filenm = filenm + '%02d' % month
if day is not None:
filenm = filenm + '%02d' % day
filenm = filenm + '.nc'
return filenm
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def sector_mean(var, lon1, lon2):
"""Return the sector mean of a variable."""
name = var.name
lonstr = atm.latlon_str(lon1, lon2, 'lon')
if (lon2 - lon1) == 360:
lon1, lon2 = None, None
name_out = name + '_ZON'
else:
name_out = name + '_SEC'
varbar = atm.dim_mean(var, 'lon', lon1, lon2)
varbar.name = name_out
varbar.attrs['varnm'] = name
varbar.attrs['lonstr'] = lonstr
varbar.attrs['filestr'] = '%s_sector_%s' % (name, lonstr)
return varbar
def var_calcs(var, jday=0, latlon=(-90, 90, 40, 120), plevs=(850, 200),
dp_vars=['U', 'OMEGA'], sector_lons=(60, 100)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
opts = merra.url_opts(var.name)
vertical = opts['vertical']
if vertical == 'X':
plevs = [None]
if dp_vars is not None and var.name in dp_vars:
dp = True
else:
dp = False
data = xray.Dataset()
# Lat-lon data
print('Lat-lon data')
for plev in plevs:
print('plev', plev)
var_out = latlon_data(var, lat1, lat2, lon1, lon2, plev)
data[var_out.name] = var_out
if dp:
print('Computing d/dp')
var_out = pgradient(var, lat1, lat2, lon1, lon2, plev)
data[var_out.name] = var_out
# Sector and zonal mean data
print('Computing zonal mean')
var_out = sector_mean(var, 0, 360)
data[var_out.name] = var_out
if vertical == 'P':
print('Computing sector mean')
var_out = sector_mean(var, sector_lons[0], sector_lons[1])
data[var_out.name] = var_out
# Compute daily data from subdaily data
nperday = len(atm.get_coord(data, 'time'))
data = atm.daily_from_subdaily(data, nperday, dayname='day',
dayvals=[jday])
# Make sure output is in a Dataset
if isinstance(data, xray.DataArray):
data = data.to_dataset()
return data
def all_data(ds, varnms, datadir, year, month, day, jday, calc_kw, nc_kw):
"""Process selected variables in a dataset and save each to file."""
files = {}
for nm in varnms:
print(nm)
data = var_calcs(ds[nm], jday, **calc_kw)
filenm = '%s%s_%d%02d%02d.nc' % (datadir, nm, year, month, day)
print('Saving to ' + filenm)
atm.disptime()
data.to_netcdf(filenm, **nc_kw)
files[nm] = filenm
return files
def read_url(url, varnms, datadir, year, month, day, jday, calc_kw, nc_kw):
"""Open url and process selected variables."""
# Number of times to attempt opening url (in case of server problems)
NMAX = 3
# Wait time (seconds) between attempts
WAIT = 5
print('Loading ' + url)
attempt = 0
while attempt < NMAX:
try:
with xray.open_dataset(url) as ds:
files = all_data(ds, varnms, datadir, year, month, day, jday,
calc_kw, nc_kw)
attempt = NMAX
except RuntimeError as err:
attempt += 1
if attempt < NMAX:
print('Error reading file. Attempting again in %d s' % WAIT)
time.sleep(WAIT)
else:
raise err
return files
def read_groups(url_dict, vargroups, datadir, year, month, day, jday, calc_kw,
nc_kw):
"""Process variables for a day, grouped by URL."""
files = {}
for key, varids in vargroups.iteritems():
url = url_dict[key]['%d%02d%02d' % (year, month, day)]
datafiles = read_url(url, varids, datadir, year, month, day, jday,
calc_kw, nc_kw)
files.update(datafiles)
return files
def get_url_dict(year, month, version, vargroups):
"""Return dict of urls for the variable groups."""
url_dict = {}
for key in vargroups:
nm = vargroups[key][0]
url_dict[key] = merra.get_urls(year, month, version, nm)
return url_dict
# Initial setup
vargroups = group_variables(varnms, version)
calc_kw = {'latlon' : latlon, 'plevs' : plevs, 'dp_vars' : dp_vars,
'sector_lons' : sector_lons}
nc_kw = { 'merra2' : {'format' : 'NETCDF4_classic', 'engine' : 'netcdf4'},
'merra' : {'format' : None, 'engine' : None}}[version]
# Read daily data and save to daily files
for year in years:
dailyfiles = collections.defaultdict(list)
for month in months:
url_dict = get_url_dict(year, month, version, vargroups)
days = range(1, atm.days_this_month(year, month) + 1)
jdays = atm.season_days(atm.month_str(month), atm.isleap(year))
for day, jday in zip(days, jdays):
files = read_groups(url_dict, vargroups, datadir, year, month, day,
jday, calc_kw, nc_kw)
for nm in files:
dailyfiles[nm] += [files[nm]]
# Concatenate daily data to yearly files
for year in years:
dates = []
dailyfiles = {}
for month in range(1, 13):
days = range(1, atm.days_this_month(year, month) + 1)
dates = dates + ['%d%02d%02d' % (year, month, d) for d in days]
for nm in varnms:
dailyfiles[nm] = [filestr % (nm, date) for date in dates]
# Consolidate daily files into yearly files and delete daily files
for nm in dailyfiles:
data = atm.load_concat(dailyfiles[nm], concat_dim='day')
for varnm in data.data_vars:
var = data[varnm]
filenm = get_filename(var, version, datadir, year)
var.name = var.attrs.get('varnm', varnm)
print('Saving to ' + filenm)
atm.save_nc(filenm, var)
#print('Deleting daily files')
#for filenm in dailyfiles[nm]:
# print(filenm)
# os.remove(filenm)
|
mit
| -3,528,073,911,942,544,400
| 32.733788
| 79
| 0.578207
| false
| 3.044041
| false
| false
| false
|
ypid/subuser
|
logic/subuserlib/classes/installedImages.py
|
1
|
2797
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
This is the set of installed images that bellongs to a given user.
"""
#external imports
import os,json,collections,sys
#internal imports
import subuserlib.classes.installedImage,subuserlib.classes.fileBackedObject, subuserlib.classes.userOwnedObject
class InstalledImages(dict,subuserlib.classes.userOwnedObject.UserOwnedObject,subuserlib.classes.fileBackedObject.FileBackedObject):
def reloadInstalledImagesList(self):
""" Reload the installed images list from disk, discarding the current in-memory version. """
self.clear()
installedImagesPath = self.getUser().getConfig()["installed-images-list"]
if os.path.exists(installedImagesPath):
with open(installedImagesPath, 'r') as file_f:
try:
installedImagesDict = json.load(file_f, object_pairs_hook=collections.OrderedDict)
except ValueError:
sys.exit("Error: installed-images.json is not a valid JSON file. Perhaps it is corrupted.")
else:
installedImagesDict = {}
# Create the InstalledImage objects.
for imageId,imageAttributes in installedImagesDict.items():
image = subuserlib.classes.installedImage.InstalledImage(
user=self.getUser(),
imageId=imageId,
imageSourceName=imageAttributes["image-source"],
sourceRepoId=imageAttributes["source-repo"],
lastUpdateTime=imageAttributes["last-update-time"])
self[imageId]=image
def save(self):
""" Save attributes of the installed images to disk. """
# Build a dictionary of installed images.
installedImagesDict = {}
for _,installedImage in self.items():
imageAttributes = {}
imageAttributes["last-update-time"] = installedImage.getLastUpdateTime()
imageAttributes["image-source"] = installedImage.getImageSourceName()
imageAttributes["source-repo"] = installedImage.getSourceRepoId()
installedImagesDict[installedImage.getImageId()] = imageAttributes
# Write that dictionary to disk.
installedImagesPath = self.getUser().getConfig()["installed-images-list"]
with open(installedImagesPath, 'w') as file_f:
json.dump(installedImagesDict, file_f, indent=1, separators=(',', ': '))
def __init__(self,user):
subuserlib.classes.userOwnedObject.UserOwnedObject.__init__(self,user)
self.reloadInstalledImagesList()
def unregisterNonExistantImages(self):
"""
Go through the installed images list and unregister any images that aren't actually installed.
"""
keysToDelete = []
for imageId,image in self.items():
if not image.isDockerImageThere():
keysToDelete.append(imageId)
for key in keysToDelete:
del self[key]
|
lgpl-3.0
| 7,330,101,416,160,461,000
| 40.132353
| 132
| 0.720772
| false
| 4.041908
| false
| false
| false
|
dhcrzf/zulip
|
tools/zulint/printer.py
|
1
|
1070
|
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
from itertools import cycle
if False:
# See https://zulip.readthedocs.io/en/latest/testing/mypy.html#mypy-in-production-scripts
from typing import Union, Text
# Terminal Color codes for use in differentiatng linters
BOLDRED = '\x1B[1;31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
ENDC = '\033[0m'
colors = cycle([GREEN, YELLOW, BLUE, MAGENTA, CYAN])
def print_err(name, color, line):
# type: (str, str, Union[Text, bytes]) -> None
# Decode with UTF-8 if in Python 3 and `line` is of bytes type.
# (Python 2 does this automatically)
if sys.version_info[0] == 3 and isinstance(line, bytes):
line = line.decode('utf-8')
print('{}{}{}|{end} {}{}{end}'.format(
color,
name,
' ' * max(0, 10 - len(name)),
BOLDRED,
line.rstrip(),
end=ENDC)
)
# Python 2's print function does not have a `flush` option.
sys.stdout.flush()
|
apache-2.0
| 7,560,309,869,632,877,000
| 25.75
| 93
| 0.627103
| false
| 3.005618
| false
| false
| false
|
matthewwardrop/formulaic
|
tests/parser/types/test_token.py
|
1
|
2289
|
import pytest
from formulaic.parser.types import Term, Token
class TestToken:
@pytest.fixture
def token_a(self):
return Token('a', kind='name')
@pytest.fixture
def token_b(self):
return Token('log(x)', kind='python', source='y ~ log(x)', source_start=4, source_end=9)
@pytest.fixture
def token_c(self):
return Token('+', kind='operator')
def test_update(self, token_a):
token_a.update('+', 1, kind='python')
assert token_a.token == 'a+'
assert token_a.kind.value == 'python'
assert token_a.source_start == 1
assert token_a.source_end == 1
assert token_a.source_loc == (1, 1)
def test_equality(self, token_a, token_b, token_c):
assert token_a == 'a'
assert token_b == 'log(x)'
assert token_c == '+'
assert token_a == token_a
assert token_b != token_a
assert token_c != token_a
assert token_a != 1
assert bool(token_a) == True
assert bool(Token()) == False
def test_hash(self, token_a, token_b, token_c):
assert hash(token_a) == hash('a')
def test_ranking(self, token_a, token_b, token_c):
assert token_a < token_b
assert token_a > token_c
with pytest.raises(TypeError):
token_a < 1
def test_to_factor(self, token_a, token_b, token_c):
f_a = token_a.to_factor()
assert f_a.expr == token_a.token
assert f_a.eval_method.value == 'lookup'
f_b = token_b.to_factor()
assert f_b.expr == token_b.token
assert f_b.eval_method.value == 'python'
with pytest.raises(KeyError):
token_c.to_factor()
def test_to_terms(self, token_a):
assert token_a.to_terms() == {Term([token_a.to_factor()])}
def test_flatten(self, token_a):
assert token_a.flatten(str_args=False) is token_a
assert token_a.flatten(str_args=True) is 'a'
def test_get_source_context(self, token_a, token_b, token_c):
assert token_a.get_source_context() is None
assert token_b.get_source_context() == 'y ~ ⧛log(x)⧚'
assert token_c.get_source_context() is None
assert token_b.get_source_context(colorize=True) == 'y ~ ⧛\x1b[1;31mlog(x)\x1b[0m⧚'
|
mit
| -7,858,089,015,185,385,000
| 29.413333
| 96
| 0.577817
| false
| 3.194678
| true
| false
| false
|
UnitedThruAction/Data
|
Tools/SuffolkParser.py
|
1
|
8507
|
"""Parse Suffolk County data file and emit OpenElections CSV.
@author n.o.franklin@gmail.com
@date 2017-07-11
"""
import sys
def parse_information_record(line):
"""Type I. Read and parse information record data."""
information = line[5:].rstrip()
return {'information': information}
def parse_office_record(line):
"""Type R. Read and parse office record data."""
office_title = line[5:45].rstrip()
office_title_std = office_title.replace(',', '')
office_district_type = line[45:46].rstrip()
try:
office_district_number = int(line[46:50])
except ValueError:
office_district_number = ''
if office_district_number == 0:
office_district_number = ''
opp_to_ballot = line[50:51]
num_election_districts = int(line[51:55])
count_eligible_voters = int(line[55:62])
try:
num_candidates = int(line[62:64])
except ValueError:
num_candidates = 0
opp_to_ballot_lookup = {'Y': True,
'N': False,
' ': 'Unknown',
'O': 'Unknown',
'0': 'Unknown',
'2': 'Unknown'}
district_type_lookup = {'U': 'United States',
'N': 'New York State',
'K': 'Suffolk County',
'A': 'Unknown',
'L': 'Unknown',
'T': 'Unknown',
'W': 'Unknown',
'S': 'Unknown',
'J': 'Unknown',
'X': 'Unknown',
'C': 'Unknown'}
return {'office_title': office_title,
'office_title_std': office_title_std,
'office_district_type': district_type_lookup[office_district_type],
'office_district_number': office_district_number,
'opp_to_ballot': opp_to_ballot_lookup[opp_to_ballot],
'num_election_districts': num_election_districts,
'count_eligible_voters': count_eligible_voters,
'num_candidates': num_candidates}
def parse_candidate_record(line):
"""Type C. Read and parse candidate record data."""
candidate_name = line[5:30].rstrip().title()
candidate_name_std = candidate_name
if ', ' in candidate_name:
# Re-order 'Smith, Bob' as 'Bob Smith'
names = candidate_name.split(', ')
candidate_name_std = "{} {}".format(names[1], names[0])
party_code = line[30:33].rstrip()
write_in_flag = line[33:34]
write_in_lookup = {'S': True, ' ': 'Unknown'}
total_votes = int(line[34:41])
row_lever_on_ballot = line[41:44].rstrip()
return {'candidate_name': candidate_name,
'candidate_name_std': candidate_name_std,
'party_code': party_code,
'write_in_flag': write_in_lookup[write_in_flag],
'total_votes': total_votes,
'row_lever_on_ballot': row_lever_on_ballot}
def parse_ed_record(line):
"""Type E. Read ED-result record data."""
record_length = int(line[:4])
town_code = line[5:6]
town_code_lookup = {'0': 'Shelter Island',
'1': 'Brookhaven',
'2': 'Huntington',
'3': 'Islip',
'4': 'Babylon',
'5': 'Smithtown',
'6': 'Southampton',
'7': 'East Hampton',
'8': 'Southold',
'9': 'Riverhead'}
ed_number = int(line[6:9])
reported_status = line[9:10].rstrip()
eligible_voters = int(line[10:14])
try:
whole_number = int(line[14:20])
except ValueError:
whole_number = 0
congress_district = int(line[34:35])
senate_district = int(line[35:36])
assembly_district = int(line[36:38])
legislative_district = int(line[38:40])
towncouncil_district = line[40:42].rstrip()
try:
blank_votes = int(line[42:46])
except ValueError:
blank_votes = 0
void_votes = int(line[46:49])
try:
scattering_votes = int(line[49:52])
except ValueError:
scattering_votes = 0
# Handle variable-length candidate fields
num_candidates = (record_length - 52) / 4
if num_candidates % 1 != 0:
raise ValueError("Incorrect number of characters on line.")
votes = []
try:
for i in range(int(num_candidates)):
start_index = 52 + (4 * i)
end_index = 56 + (4 * i)
votes.append(int(line[start_index:end_index]))
except TypeError as t:
print("Caught TypeError with num_candidates {}, record_length {}, "
"line '{}'".format(num_candidates, record_length, line))
# Generate Suffolk-specific precinct code
precinct_code = "{} #: {:>3}".format(town_code_lookup[town_code].title(),
"{:02.0f}".format(ed_number))
return {'town_name': town_code_lookup[town_code],
'ed_number': ed_number,
'reported_status': reported_status,
'eligible_voters': eligible_voters,
'whole_number': whole_number,
'congress_district': congress_district,
'senate_district': senate_district,
'assembly_district': assembly_district,
'legislative_district': legislative_district,
'towncouncil_district': towncouncil_district,
'blank_votes': blank_votes,
'void_votes': void_votes,
'scattering_votes': scattering_votes,
'num_candidates': num_candidates,
'votes': votes,
'precinct_code': precinct_code}
def process_file(filename):
"""Read the whole file and emit output in standard OE format."""
out_handle = open("{}-output.csv".format(filename), 'w')
out_handle.write('county,precinct,office,district,party,candidate,votes\n')
candidates = None
office = None
in_handle = open(filename, 'r')
for line in in_handle:
if line[4:5] == 'I':
# Information
print(parse_information_record(line))
if line[4:5] == 'R':
# Office
office = parse_office_record(line)
# Reset candidates
candidates = []
if line[4:5] == 'C':
# Candidate
candidates.append(parse_candidate_record(line))
if line[4:5] == 'E':
# ED Breakdown
election_district = parse_ed_record(line)
for i, vote in enumerate(election_district['votes']):
# County
output = ['Suffolk']
# Precinct
output.append(election_district['precinct_code'])
# Office
output.append(office['office_title_std'])
# District
output.append(str(office['office_district_number']))
# Party
try:
output.append(candidates[i]['party_code'])
except IndexError:
output.append('')
# Candidate
try:
output.append(candidates[i]['candidate_name_std'])
except IndexError:
output.append('')
# Votes
output.append(str(vote))
out_handle.write(",".join(output))
out_handle.write("\n")
# Append ED void/scattering votes
special_types = {'Scattering': 'scattering_votes',
'Void': 'void_votes',
'Blank': 'blank_votes'}
for name in special_types:
if election_district[special_types[name]] > 0:
output = ['Suffolk',
election_district['precinct_code'],
office['office_title_std'],
str(office['office_district_number']),
'',
name,
str(election_district[special_types[name]])]
out_handle.write(",".join(output))
out_handle.write("\n")
in_handle.close()
out_handle.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit("Provide filename on command-line.")
else:
process_file(sys.argv[1])
|
apache-2.0
| -8,484,805,993,741,460,000
| 36.977679
| 79
| 0.508522
| false
| 3.895147
| false
| false
| false
|
AhmedHani/Kaggle-Machine-Learning-Competitions
|
Easy/What's Cooking/get_data.py
|
1
|
2057
|
__author__ = 'Ahmed Hani Ibrahim'
import json
import scipy as sc
import numpy as np
def get_train_data():
with open('./train.json') as r:
data = json.load(r)
r.close()
return data
def get_test_data():
with open('./test.json') as r:
data = json.load(r)
r.close()
return data
def encode_data(data):
labels = [item['cuisine'] for item in data]
unique_labels = set(labels)
labels_dictionary = {}
count = 0
for label in unique_labels:
labels_dictionary[label] = count
count += 1
ingredients = [item['ingredients'] for item in data]
unique_ingredients = set(inner_item for outer_item in ingredients for inner_item in outer_item)
ingredients_dictionary = {}
count = 0
for ingredient in unique_ingredients:
ingredients_dictionary[ingredient] = count
count += 1
return labels, labels_dictionary, ingredients, ingredients_dictionary, data
def vectorize_data(labels, labels_dictionary, ingredients, ingredients_dictionary, data):
labels_list = []
ingredients_list = []
for item in data:
if u'cuisine' in item :
label = str(item[u'cuisine'])
if label in labels_dictionary:
labels_list.append(labels_dictionary[label])
if u'ingredients' in item:
temp_ingredients = item[u'ingredients']
temp_numerical_ingredients = []
for ingredient in temp_ingredients:
if ingredient in ingredients_dictionary:
index = ingredients_dictionary[ingredient]
temp_numerical_ingredients.append(index)
ingredients_list.append(temp_numerical_ingredients)
print(len(ingredients_list), len(labels_list))
return (np.array(ingredients_list), np.array(labels_list))
#labels, labels_dictionary, ingredients, ingredients_dictionary, data = encode_data(get_train_data())
#features, classes = vectorize_data(labels, labels_dictionary, ingredients, ingredients_dictionary, data)
|
mit
| 2,965,865,717,847,519,000
| 27.971831
| 105
| 0.644628
| false
| 3.439799
| false
| false
| false
|
qiqi/fds
|
tests/test_autonomous_system/test_vida_automomous_system.py
|
1
|
1891
|
import os
import sys
import shutil
import string
import subprocess
from numpy import *
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(my_path, '../..'))
from fds.cti_restart_io import *
ref_fname = os.path.join(my_path, '..', 'data', 'cti-sample-restart-file.les')
initial_state = load_les(ref_fname, verbose=True)
base_dir = os.path.join(my_path, 'vida')
def run_vida_in(run_dir, state, steps):
print('Running {0} steps'.format(steps))
os.mkdir(run_dir)
template = open(os.path.join(my_path, 'vida.template')).read()
template = string.Template(template)
fname = os.path.join(run_dir, 'initial.les')
shutil.copy(ref_fname, fname)
state['STEP'] = 1
save_les(fname, state, verbose=True)
with open(os.path.join(run_dir, 'vida.in'), 'w') as f:
f.write(template.substitute(NSTEPS=str(steps+1)))
with open(os.path.join(run_dir, 'vida.out'), 'w') as f:
subprocess.check_call('/home/qiqi/BulletBody-ref/vida.exe',
cwd=run_dir, stdout=f, stderr=f)
fname = os.path.join(run_dir, 'result.les')
return load_les(fname, verbose=True)
if __name__ == '__main__':
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.mkdir(base_dir)
intermediate_state = run_vida_in(os.path.join(base_dir, 'first_50_steps'),
initial_state, 50)
final_state_1 = run_vida_in(os.path.join(base_dir, 'second_50_steps'),
intermediate_state, 50)
final_state_2 = run_vida_in(os.path.join(base_dir, 'all_100_steps_at_once'),
initial_state, 100)
for k in final_state_1:
if k != 'STEP':
if (final_state_1[k] == final_state_2[k]).all():
print(k, ' matches')
else:
print(k, ' does not match')
|
gpl-3.0
| 3,876,874,120,806,105,000
| 36.82
| 80
| 0.586991
| false
| 3.05
| false
| false
| false
|
sga001/cinch
|
exploits/CVE-2016-3138.py
|
1
|
1952
|
#!/usr/bin/env python3
from USB import *
from USBDevice import *
from USBConfiguration import *
from USBInterface import *
from USBEndpoint import *
class PwnUSBDevice(USBDevice):
name = "USB device"
def handle_buffer_available(self, lll):
return
def __init__(self, maxusb_app, verbose=0):
interface = USBInterface(
0, # interface number
0, # alternate setting
255, # interface class
0, # subclass
0, # protocol
0, # string index
verbose,
[],
{}
)
config = USBConfiguration(
1, # index
"Emulated Device", # string desc
[ interface ] # interfaces
)
USBDevice.__init__(
self,
maxusb_app,
0, # device class
0, # device subclass
0, # protocol release number
64, # max packet size for endpoint 0
0x0482, # vendor id
0x0203, # product id
0, # device revision
"Kyocera Corp.", # manufacturer string
"AH-K3001V", # product string
"?", # serial number string
[ config ],
verbose=verbose
)
from Facedancer import *
from MAXUSBApp import *
sp = GoodFETSerialPort()
fd = Facedancer(sp, verbose=1)
u = MAXUSBApp(fd, verbose=1)
d = PwnUSBDevice(u, verbose=4)
d.connect()
try:
d.run()
except KeyboardInterrupt:
d.disconnect()
|
cc0-1.0
| 1,698,712,239,874,196,700
| 28.575758
| 72
| 0.412398
| false
| 5.083333
| false
| false
| false
|
nion-software/nionswift
|
nion/swift/model/Project.py
|
1
|
19701
|
# standard libraries
import functools
import logging
import pathlib
import typing
import uuid
import weakref
# local libraries
from nion.swift.model import Changes
from nion.swift.model import Connection
from nion.swift.model import DataGroup
from nion.swift.model import Symbolic
from nion.swift.model import DataItem
from nion.swift.model import DataStructure
from nion.swift.model import DisplayItem
from nion.swift.model import FileStorageSystem
from nion.swift.model import Persistence
from nion.swift.model import WorkspaceLayout
from nion.utils import Converter
from nion.utils import ListModel
from nion.utils import Observable
ProjectItemType = typing.Union[DataItem.DataItem, DisplayItem.DisplayItem, DataStructure.DataStructure, Connection.Connection, Symbolic.Computation]
class Project(Observable.Observable, Persistence.PersistentObject):
"""A project manages raw data items, display items, computations, data structures, and connections.
Projects are stored in project indexes, which are files that describe how to find data and and tracks the other
project relationships (display items, computations, data structures, connections).
Projects manage reading, writing, and data migration.
"""
PROJECT_VERSION = 3
_processing_descriptions = dict()
def __init__(self, storage_system: FileStorageSystem.ProjectStorageSystem):
super().__init__()
self.define_type("project")
self.define_property("title", str())
self.define_relationship("data_items", data_item_factory, insert=self.__data_item_inserted, remove=self.__data_item_removed)
self.define_relationship("display_items", display_item_factory, insert=self.__display_item_inserted, remove=self.__display_item_removed)
self.define_relationship("computations", computation_factory, insert=self.__computation_inserted, remove=self.__computation_removed)
self.define_relationship("data_structures", data_structure_factory, insert=self.__data_structure_inserted, remove=self.__data_structure_removed)
self.define_relationship("connections", Connection.connection_factory, insert=self.__connection_inserted, remove=self.__connection_removed)
self.define_relationship("data_groups", DataGroup.data_group_factory, insert=self.__data_group_inserted, remove=self.__data_group_removed)
self.define_relationship("workspaces", WorkspaceLayout.factory)
self.define_property("workspace_uuid", converter=Converter.UuidToStringConverter())
self.define_property("data_item_references", dict(), hidden=True, changed=self.__property_changed) # map string key to data item, used for data acquisition channels
self.define_property("mapped_items", list(), changed=self.__property_changed) # list of item references, used for shortcut variables in scripts
self.handle_start_read = None
self.handle_insert_model_item = None
self.handle_remove_model_item = None
self.handle_finish_read = None
self.__has_been_read = False
self._raw_properties = None # debugging
self.__storage_system = storage_system
self.set_storage_system(self.__storage_system)
def close(self) -> None:
self.handle_start_read = None
self.handle_insert_model_item = None
self.handle_remove_model_item = None
self.handle_finish_read = None
self.__storage_system.close()
self.__storage_system = None
super().close()
def open(self) -> None:
self.__storage_system.reset() # this makes storage reusable during tests
def create_proxy(self) -> Persistence.PersistentObjectProxy:
return self.container.create_item_proxy(item=self)
@property
def item_specifier(self) -> Persistence.PersistentObjectSpecifier:
return Persistence.PersistentObjectSpecifier(item_uuid=self.uuid)
def create_specifier(self, item: Persistence.PersistentObject) -> Persistence.PersistentObjectSpecifier:
return Persistence.PersistentObjectSpecifier(item=item)
def insert_model_item(self, container, name, before_index, item) -> None:
# special handling to pass on to the document model
assert callable(self.handle_insert_model_item)
self.handle_insert_model_item(container, name, before_index, item)
def remove_model_item(self, container, name, item, *, safe: bool=False) -> Changes.UndeleteLog:
# special handling to pass on to the document model
assert callable(self.handle_remove_model_item)
return self.handle_remove_model_item(container, name, item, safe=safe)
@property
def storage_system_path(self) -> pathlib.Path:
return pathlib.Path(self.__storage_system.get_identifier())
@property
def project_uuid(self) -> typing.Optional[uuid.UUID]:
properties = self.__storage_system.get_storage_properties()
try:
return uuid.UUID(properties.get("uuid", str(uuid.uuid4()))) if properties else None
except Exception:
return None
@property
def project_state(self) -> str:
project_uuid = self.project_uuid
project_version = self.project_version
if project_uuid is not None and project_version is not None:
if project_version == FileStorageSystem.PROJECT_VERSION:
return "loaded" if self.__has_been_read else "unloaded"
else:
return "needs_upgrade"
return "invalid"
@property
def project_version(self) -> typing.Optional[int]:
properties = self.__storage_system.get_storage_properties()
try:
return properties.get("version", None) if properties else None
except Exception:
return None
@property
def project_filter(self) -> ListModel.Filter:
def is_display_item_active(project_weak_ref, display_item: DisplayItem.DisplayItem) -> bool:
return display_item.project == project_weak_ref()
# use a weak reference to avoid circular references loops that prevent garbage collection
return ListModel.PredicateFilter(functools.partial(is_display_item_active, weakref.ref(self)))
@property
def project_storage_system(self) -> FileStorageSystem.ProjectStorageSystem:
return self.__storage_system
def __data_item_inserted(self, name: str, before_index: int, data_item: DataItem.DataItem) -> None:
self.notify_insert_item("data_items", data_item, before_index)
def __data_item_removed(self, name: str, index: int, data_item: DataItem.DataItem) -> None:
self.notify_remove_item("data_items", data_item, index)
def __display_item_inserted(self, name: str, before_index: int, display_item: DisplayItem.DisplayItem) -> None:
self.notify_insert_item("display_items", display_item, before_index)
def __display_item_removed(self, name: str, index: int, display_item: DisplayItem.DisplayItem) -> None:
self.notify_remove_item("display_items", display_item, index)
def __data_structure_inserted(self, name: str, before_index: int, data_structure: DataStructure.DataStructure) -> None:
self.notify_insert_item("data_structures", data_structure, before_index)
def __data_structure_removed(self, name: str, index: int, data_structure: DataStructure.DataStructure) -> None:
self.notify_remove_item("data_structures", data_structure, index)
def __computation_inserted(self, name: str, before_index: int, computation: Symbolic.Computation) -> None:
self.notify_insert_item("computations", computation, before_index)
def __computation_removed(self, name: str, index: int, computation: Symbolic.Computation) -> None:
self.notify_remove_item("computations", computation, index)
def __connection_inserted(self, name: str, before_index: int, connection: Connection.Connection) -> None:
self.notify_insert_item("connections", connection, before_index)
def __connection_removed(self, name: str, index: int, connection: Connection.Connection) -> None:
self.notify_remove_item("connections", connection, index)
def __data_group_inserted(self, name: str, before_index: int, data_group: DataGroup.DataGroup) -> None:
self.notify_insert_item("data_groups", data_group, before_index)
def __data_group_removed(self, name: str, index: int, data_group: DataGroup.DataGroup) -> None:
self.notify_remove_item("data_groups", data_group, index)
def _get_relationship_persistent_dict(self, item, key: str, index: int) -> typing.Dict:
if key == "data_items":
return self.__storage_system.get_persistent_dict("data_items", item.uuid)
else:
return super()._get_relationship_persistent_dict(item, key, index)
def _get_relationship_persistent_dict_by_uuid(self, item, key: str) -> typing.Optional[typing.Dict]:
if key == "data_items":
return self.__storage_system.get_persistent_dict("data_items", item.uuid)
else:
return super()._get_relationship_persistent_dict_by_uuid(item, key)
def prepare_read_project(self) -> None:
logging.getLogger("loader").info(f"Loading project {self.__storage_system.get_identifier()}")
self._raw_properties = self.__storage_system.read_project_properties() # combines library and data item properties
self.uuid = uuid.UUID(self._raw_properties.get("uuid", str(uuid.uuid4())))
def read_project(self) -> None:
if callable(self.handle_start_read):
self.handle_start_read()
properties = self._raw_properties
if properties:
project_version = properties.get("version", None)
if project_version is not None and project_version == FileStorageSystem.PROJECT_VERSION:
for item_d in properties.get("data_items", list()):
data_item = DataItem.DataItem()
data_item.begin_reading()
data_item.read_from_dict(item_d)
data_item.finish_reading()
if not self.get_item_by_uuid("data_items", data_item.uuid):
self.load_item("data_items", len(self.data_items), data_item)
else:
data_item.close()
for item_d in properties.get("display_items", list()):
display_item = DisplayItem.DisplayItem()
display_item.begin_reading()
display_item.read_from_dict(item_d)
display_item.finish_reading()
if not self.get_item_by_uuid("display_items", display_item.uuid):
self.load_item("display_items", len(self.display_items), display_item)
else:
display_item.close()
for item_d in properties.get("data_structures", list()):
data_structure = DataStructure.DataStructure()
data_structure.begin_reading()
data_structure.read_from_dict(item_d)
data_structure.finish_reading()
if not self.get_item_by_uuid("data_structures", data_structure.uuid):
self.load_item("data_structures", len(self.data_structures), data_structure)
else:
data_structure.close()
for item_d in properties.get("computations", list()):
computation = Symbolic.Computation()
computation.begin_reading()
computation.read_from_dict(item_d)
computation.finish_reading()
if not self.get_item_by_uuid("computations", computation.uuid):
self.load_item("computations", len(self.computations), computation)
# TODO: handle update script and bind after reload in document model
computation.update_script(Project._processing_descriptions)
computation.reset()
else:
computation.close()
for item_d in properties.get("connections", list()):
connection = Connection.connection_factory(item_d.get)
connection.begin_reading()
connection.read_from_dict(item_d)
connection.finish_reading()
if not self.get_item_by_uuid("connections", connection.uuid):
self.load_item("connections", len(self.connections), connection)
else:
connection.close()
for item_d in properties.get("data_groups", list()):
data_group = DataGroup.data_group_factory(item_d.get)
data_group.begin_reading()
data_group.read_from_dict(item_d)
data_group.finish_reading()
if not self.get_item_by_uuid("data_groups", data_group.uuid):
self.load_item("data_groups", len(self.data_groups), data_group)
else:
data_group.close()
for item_d in properties.get("workspaces", list()):
workspace = WorkspaceLayout.factory(item_d.get)
workspace.begin_reading()
workspace.read_from_dict(item_d)
workspace.finish_reading()
if not self.get_item_by_uuid("workspaces", workspace.uuid):
self.load_item("workspaces", len(self.workspaces), workspace)
else:
workspace.close()
workspace_uuid_str = properties.get("workspace_uuid", None)
if workspace_uuid_str:
self._set_persistent_property_value("workspace_uuid", uuid.UUID(workspace_uuid_str))
self._set_persistent_property_value("data_item_references", properties.get("data_item_references", dict()))
self._set_persistent_property_value("mapped_items", properties.get("mapped_items", list()))
self.__has_been_read = True
if callable(self.handle_finish_read):
self.handle_finish_read()
def __property_changed(self, name, value):
self.notify_property_changed(name)
def append_data_item(self, data_item: DataItem.DataItem) -> None:
assert not self.get_item_by_uuid("data_items", data_item.uuid)
self.append_item("data_items", data_item)
data_item.write_data_if_not_delayed() # initially write to disk
def remove_data_item(self, data_item: DataItem.DataItem) -> None:
self.remove_item("data_items", data_item)
def restore_data_item(self, data_item_uuid: uuid.UUID) -> typing.Optional[DataItem.DataItem]:
item_d = self.__storage_system.restore_item(data_item_uuid)
if item_d is not None:
data_item_uuid = uuid.UUID(item_d.get("uuid"))
large_format = item_d.get("__large_format", False)
data_item = DataItem.DataItem(item_uuid=data_item_uuid, large_format=large_format)
data_item.begin_reading()
data_item.read_from_dict(item_d)
data_item.finish_reading()
assert not self.get_item_by_uuid("data_items", data_item.uuid)
self.append_item("data_items", data_item)
assert data_item.container == self
return data_item
return None
def append_display_item(self, display_item: DisplayItem.DisplayItem) -> None:
assert not self.get_item_by_uuid("display_items", display_item.uuid)
self.append_item("display_items", display_item)
def remove_display_item(self, display_item: DisplayItem.DisplayItem) -> None:
self.remove_item("display_items", display_item)
def append_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
assert not self.get_item_by_uuid("data_structures", data_structure.uuid)
self.append_item("data_structures", data_structure)
def remove_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
self.remove_item("data_structures", data_structure)
def append_computation(self, computation: Symbolic.Computation) -> None:
assert not self.get_item_by_uuid("computations", computation.uuid)
self.append_item("computations", computation)
def remove_computation(self, computation: Symbolic.Computation) -> None:
self.remove_item("computations", computation)
def append_connection(self, connection: Connection.Connection) -> None:
assert not self.get_item_by_uuid("connections", connection.uuid)
self.append_item("connections", connection)
def remove_connection(self, connection: Connection.Connection) -> None:
self.remove_item("connections", connection)
@property
def data_item_references(self) -> typing.Dict[str, uuid.UUID]:
return dict(self._get_persistent_property_value("data_item_references").items())
def set_data_item_reference(self, key: str, data_item: DataItem.DataItem) -> None:
data_item_references = self.data_item_references
data_item_references[key] = data_item.item_specifier.write()
self._set_persistent_property_value("data_item_references", {k: v for k, v in data_item_references.items()})
def clear_data_item_reference(self, key: str) -> None:
data_item_references = self.data_item_references
del data_item_references[key]
self._set_persistent_property_value("data_item_references", {k: v for k, v in data_item_references.items()})
@property
def mapped_items(self) -> typing.List[typing.Union[typing.Mapping, str]]:
return list(self._get_persistent_property_value("mapped_items"))
@mapped_items.setter
def mapped_items(self, value: typing.List[typing.Union[typing.Mapping, str]]) -> None:
self._set_persistent_property_value("mapped_items", value)
def prune(self) -> None:
self.__storage_system.prune()
def migrate_to_latest(self) -> None:
self.__storage_system.migrate_to_latest()
self.__storage_system.load_properties()
self.update_storage_system() # reload the properties
self.prepare_read_project()
self.read_project()
def unmount(self) -> None:
while len(self.data_groups) > 0:
self.unload_item("data_groups", len(self.data_groups) - 1)
while len(self.connections) > 0:
self.unload_item("connections", len(self.connections) - 1)
while len(self.computations) > 0:
self.unload_item("computations", len(self.computations) - 1)
while len(self.data_structures) > 0:
self.unload_item("data_structures", len(self.data_structures) - 1)
while len(self.display_items) > 0:
self.unload_item("display_items", len(self.display_items) - 1)
while len(self.data_items) > 0:
self.unload_item("data_items", len(self.data_items) - 1)
def data_item_factory(lookup_id):
data_item_uuid = uuid.UUID(lookup_id("uuid"))
large_format = lookup_id("__large_format", False)
return DataItem.DataItem(item_uuid=data_item_uuid, large_format=large_format)
def display_item_factory(lookup_id):
display_item_uuid = uuid.UUID(lookup_id("uuid"))
return DisplayItem.DisplayItem(item_uuid=display_item_uuid)
def computation_factory(lookup_id):
return Symbolic.Computation()
def data_structure_factory(lookup_id):
return DataStructure.DataStructure()
|
gpl-3.0
| 7,469,101,055,885,551,000
| 48.5
| 173
| 0.649662
| false
| 4.032133
| false
| false
| false
|
richrd/bx
|
modules/autochanmode.py
|
1
|
1311
|
from mod_base import*
class AutoChanMode(Listener):
"""Automatically manage channel modes according to config."""
def init(self):
self.events = [
IRC_EVT_CHAN_USER_MODE_CHANGE,
IRC_EVT_CHAN_MODE_CHANGE,
]
self.all_modes = "cCDilmMnNoprstTuv"
def RepairModes(self, event):
if event.id == IRC_EVT_CHAN_MODE_CHANGE and event.user == self.bot.me:
return False
valid = self.bot.config.GetChannelModes(event.win)
if valid == None:
return False
del_modes = self.all_modes
for mode in valid:
del_modes = del_modes.replace(mode, "")
event.win.SetModes("-" + del_modes)
event.win.SetModes(valid)
def event(self, event):
m = self.bot.config.GetChannelModes(event.win)
if m == None:
return False
if event.id == IRC_EVT_CHAN_MODE_CHANGE:
if self.bot.me.HasOP(event.win):
self.RepairModes(event)
if event.id == IRC_EVT_CHAN_USER_MODE_CHANGE:
if event.user == self.bot.me:
if self.bot.me.HasOP(event.win):
self.RepairModes(event)
module = {
"class": AutoChanMode,
"type": MOD_LISTENER,
}
|
apache-2.0
| 7,036,446,650,438,043,000
| 30.238095
| 78
| 0.546911
| false
| 3.601648
| false
| false
| false
|
mompiou/stereo-proj
|
schmidUI.py
|
1
|
3558
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'schmidUI.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Schmid(object):
def setupUi(self, Schmid):
Schmid.setObjectName(_fromUtf8("Schmid"))
Schmid.resize(343, 320)
self.layoutWidget = QtGui.QWidget(Schmid)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 318, 298))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.schmid_text = QtGui.QTextEdit(self.layoutWidget)
self.schmid_text.setObjectName(_fromUtf8("schmid_text"))
self.gridLayout.addWidget(self.schmid_text, 6, 0, 1, 3)
self.b_label = QtGui.QLabel(self.layoutWidget)
self.b_label.setObjectName(_fromUtf8("b_label"))
self.gridLayout.addWidget(self.b_label, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self.layoutWidget)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 7, 0, 1, 2)
self.T_label = QtGui.QLabel(self.layoutWidget)
self.T_label.setObjectName(_fromUtf8("T_label"))
self.gridLayout.addWidget(self.T_label, 3, 0, 1, 1)
self.b_entry = QtGui.QLineEdit(self.layoutWidget)
self.b_entry.setObjectName(_fromUtf8("b_entry"))
self.gridLayout.addWidget(self.b_entry, 0, 1, 1, 1)
self.T_entry = QtGui.QLineEdit(self.layoutWidget)
self.T_entry.setObjectName(_fromUtf8("T_entry"))
self.gridLayout.addWidget(self.T_entry, 3, 1, 1, 1)
self.n_entry = QtGui.QLineEdit(self.layoutWidget)
self.n_entry.setObjectName(_fromUtf8("n_entry"))
self.gridLayout.addWidget(self.n_entry, 2, 1, 1, 1)
self.n_label = QtGui.QLabel(self.layoutWidget)
self.n_label.setObjectName(_fromUtf8("n_label"))
self.gridLayout.addWidget(self.n_label, 2, 0, 1, 1)
self.schmid_factor_label = QtGui.QLabel(self.layoutWidget)
self.schmid_factor_label.setText(_fromUtf8(""))
self.schmid_factor_label.setObjectName(_fromUtf8("schmid_factor_label"))
self.gridLayout.addWidget(self.schmid_factor_label, 4, 1, 1, 1)
self.retranslateUi(Schmid)
QtCore.QMetaObject.connectSlotsByName(Schmid)
Schmid.setTabOrder(self.b_entry, self.n_entry)
Schmid.setTabOrder(self.n_entry, self.T_entry)
Schmid.setTabOrder(self.T_entry, self.schmid_text)
Schmid.setTabOrder(self.schmid_text, self.buttonBox)
def retranslateUi(self, Schmid):
Schmid.setWindowTitle(_translate("Schmid", "Schmid Factor", None))
self.b_label.setText(_translate("Schmid", "b", None))
self.T_label.setText(_translate("Schmid", "T", None))
self.n_label.setText(_translate("Schmid", "n", None))
|
gpl-2.0
| -1,922,772,828,520,830,500
| 45.207792
| 98
| 0.681001
| false
| 3.451018
| false
| false
| false
|
fnurl/alot
|
alot/helper.py
|
1
|
20813
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# Copyright © 2017 Dylan Baker
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
from __future__ import division
from datetime import timedelta
from datetime import datetime
from collections import deque
from cStringIO import StringIO
import logging
import mimetypes
import os
import re
import shlex
import subprocess
import email
from email.generator import Generator
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import urwid
import magic
from twisted.internet import reactor
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
# encode s to utf-8 for shlex
if isinstance(s, unicode):
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex)
def split_commandstring(cmdstring):
"""
split command string into a list of strings to pass on to subprocess.Popen
and the like. This simply calls shlex.split but works also with unicode
bytestrings.
"""
if isinstance(cmdstring, unicode):
cmdstring = cmdstring.encode('utf-8', errors='ignore')
return shlex.split(cmdstring)
def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines)
def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
"""
if enc is None:
enc = 'ascii'
try:
string = unicode(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already unicode
pass
return string
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'\u2026'
return string[:maxlen]
def shorten_author_string(authors_string, maxlength):
"""
Parse a list of authors concatenated as a text string (comma
separated) and smartly adjust them to maxlength.
1) If the complete list of sender names does not fit in maxlength, it
tries to shorten names by using only the first part of each.
2) If the list is still too long, hide authors according to the
following priority:
- First author is always shown (if too long is shorten with ellipsis)
- If possible, last author is also shown (if too long, uses ellipsis)
- If there are more than 2 authors in the thread, show the
maximum of them. More recent senders have higher priority.
- If it is finally necessary to hide any author, an ellipsis
between first and next authors is added.
"""
# I will create a list of authors by parsing author_string. I use
# deque to do popleft without performance penalties
authors = deque()
# If author list is too long, it uses only the first part of each
# name (gmail style)
short_names = len(authors_string) > maxlength
for au in authors_string.split(", "):
if short_names:
author_as_list = au.split()
if len(author_as_list) > 0:
authors.append(author_as_list[0])
else:
authors.append(au)
# Author chain will contain the list of author strings to be
# concatenated using commas for the final formatted author_string.
authors_chain = deque()
if len(authors) == 0:
return u''
# reserve space for first author
first_au = shorten(authors.popleft(), maxlength)
remaining_length = maxlength - len(first_au)
# Tries to add an ellipsis if no space to show more than 1 author
if authors and maxlength > 3 and remaining_length < 3:
first_au = shorten(first_au, maxlength - 3)
remaining_length += 3
# Tries to add as more authors as possible. It takes into account
# that if any author will be hidden, and ellipsis should be added
while authors and remaining_length >= 3:
au = authors.pop()
if len(au) > 1 and (remaining_length == 3 or (authors and
remaining_length < 7)):
authors_chain.appendleft(u'\u2026')
break
else:
if authors:
# 5= ellipsis + 2 x comma and space used as separators
au_string = shorten(au, remaining_length - 5)
else:
# 2 = comma and space used as separator
au_string = shorten(au, remaining_length - 2)
remaining_length -= len(au_string) + 2
authors_chain.appendleft(au_string)
# Add the first author to the list and concatenate list
authors_chain.appendleft(first_au)
authorsstring = ', '.join(authors_chain)
return authorsstring
def pretty_datetime(d):
"""
translates :class:`datetime` `d` to a "sup-style" human readable string.
>>> now = datetime.now()
>>> now.strftime('%c')
'Sat 31 Mar 2012 14:47:26 '
>>> pretty_datetime(now)
u'just now'
>>> pretty_datetime(now - timedelta(minutes=1))
u'1min ago'
>>> pretty_datetime(now - timedelta(hours=5))
u'5h ago'
>>> pretty_datetime(now - timedelta(hours=12))
u'02:54am'
>>> pretty_datetime(now - timedelta(days=1))
u'yest 02pm'
>>> pretty_datetime(now - timedelta(days=2))
u'Thu 02pm'
>>> pretty_datetime(now - timedelta(days=7))
u'Mar 24'
>>> pretty_datetime(now - timedelta(days=356))
u'Apr 2011'
"""
ampm = d.strftime('%p').lower()
if len(ampm):
hourfmt = '%I' + ampm
hourminfmt = '%I:%M' + ampm
else:
hourfmt = '%Hh'
hourminfmt = '%H:%M'
now = datetime.now()
today = now.date()
if d.date() == today or d > now - timedelta(hours=6):
delta = datetime.now() - d
if delta.seconds < 60:
string = 'just now'
elif delta.seconds < 3600:
string = '%dmin ago' % (delta.seconds // 60)
elif delta.seconds < 6 * 3600:
string = '%dh ago' % (delta.seconds // 3600)
else:
string = d.strftime(hourminfmt)
elif d.date() == today - timedelta(1):
string = d.strftime('yest ' + hourfmt)
elif d.date() > today - timedelta(7):
string = d.strftime('%a ' + hourfmt)
elif d.year != today.year:
string = d.strftime('%b %Y')
else:
string = d.strftime('%b %d')
return string_decode(string, 'UTF-8')
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
try:
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
out, err = proc.communicate(stdin)
ret = proc.returncode
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
out = string_decode(out, urwid.util.detected_encoding)
err = string_decode(err, urwid.util.detected_encoding)
return out, err, ret
def call_cmd_async(cmdlist, stdin=None, env=None):
"""
get a shell commands output, error message and return value as a deferred.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: deferred that calls back with triple of stdout, stderr and
return value of the shell command
:rtype: `twisted.internet.defer.Deferred`
"""
class _EverythingGetter(ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
self.outBuf = StringIO()
self.errBuf = StringIO()
self.outReceived = self.outBuf.write
self.errReceived = self.errBuf.write
def processEnded(self, status):
termenc = urwid.util.detected_encoding
out = string_decode(self.outBuf.getvalue(), termenc)
err = string_decode(self.errBuf.getvalue(), termenc)
if status.value.exitCode == 0:
self.deferred.callback(out)
else:
terminated_obj = status.value
terminated_obj.stderr = err
self.deferred.errback(terminated_obj)
d = Deferred()
environment = os.environ
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
proc = reactor.spawnProcess(_EverythingGetter(d), executable=cmdlist[0],
env=environment,
args=cmdlist)
if stdin:
logging.debug('writing to stdin')
proc.write(stdin)
proc.closeStdin()
return d
def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API')
def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version
# TODO: make this work on blobs, not paths
def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part
def shell_quote(text):
"""Escape the given text for passing it to the shell for interpretation.
The resulting string will be parsed into one "word" (in the sense used in
the shell documentation, see sh(1)) by the shell.
:param text: the text to quote
:type text: str
:returns: the quoted text
:rtype: str
"""
return "'%s'" % text.replace("'", """'"'"'""")
def humanize_size(size):
"""Create a nice human readable representation of the given number
(understood as bytes) using the "KiB" and "MiB" suffixes to indicate
kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to
a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed
to a megabyte which is 1000**2 bytes).
:param size: the number to convert
:type size: int
:returns: the human readable representation of size
:rtype: str
"""
for factor, format_string in ((1, '%i'),
(1024, '%iKiB'),
(1024 * 1024, '%.1fMiB')):
if size / factor < 1024:
return format_string % (size / factor)
return format_string % (size / factor)
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix)
def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.unquote(value)
elif value:
headers[key] = [urllib.unquote(value)]
return (headers, body)
else:
return (None, None)
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers)
def RFC3156_canonicalize(text):
"""
Canonicalizes plain text (MIME-encoded usually) according to RFC3156.
This function works as follows (in that order):
1. Convert all line endings to \\\\r\\\\n (DOS line endings).
2. Ensure the text ends with a newline (\\\\r\\\\n).
3. Encode all occurences of "From " at the beginning of a line
to "From=20" in order to prevent other mail programs to replace
this with "> From" (to avoid MBox conflicts) and thus invalidate
the signature.
:param text: text to canonicalize (already encoded as quoted-printable)
:rtype: str
"""
text = re.sub("\r?\n", "\r\n", text)
if not text.endswith("\r\n"):
text += "\r\n"
text = re.sub("^From ", "From=20", text, flags=re.MULTILINE)
return text
def email_as_string(mail):
"""
Converts the given message to a string, without mangling "From" lines
(like as_string() does).
:param mail: email to convert to string
:rtype: str
"""
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=78)
g.flatten(mail)
as_string = RFC3156_canonicalize(fp.getvalue())
if isinstance(mail, MIMEMultipart):
# Get the boundary for later
boundary = mail.get_boundary()
# Workaround for http://bugs.python.org/issue14983:
# Insert a newline before the outer mail boundary so that other mail
# clients can verify the signature when sending an email which contains
# attachments.
as_string = re.sub(r'--(\r\n)--' + boundary,
r'--\g<1>\g<1>--' + boundary,
as_string, flags=re.MULTILINE)
return as_string
|
gpl-3.0
| 5,192,336,678,170,296,000
| 32.567742
| 79
| 0.617769
| false
| 3.959665
| false
| false
| false
|
ArcherSys/ArcherSys
|
Lib/unittest/test/testmock/__init__.py
|
1
|
1535
|
<<<<<<< HEAD
<<<<<<< HEAD
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
=======
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
mit
| -7,664,057,494,624,798,000
| 25.929825
| 61
| 0.631922
| false
| 3.753056
| true
| false
| false
|
Alaxe/judgeSystem
|
users/migrations/0003_auto_20150628_1423.py
|
1
|
1618
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150628_1254'),
]
operations = [
migrations.RemoveField(
model_name='userproblemdata',
name='problem',
),
migrations.RemoveField(
model_name='userproblemdata',
name='user',
),
migrations.RemoveField(
model_name='userstatts',
name='user',
),
migrations.AlterField(
model_name='confirmation',
name='code',
field=models.CharField(max_length=32, default='1S5YH6W2QZM6M2CAON7SRYVOHW3QGJ6L'),
),
migrations.AlterField(
model_name='confirmation',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 28, 11, 23, 5, 785908, tzinfo=utc)),
),
migrations.AlterField(
model_name='passreset',
name='code',
field=models.CharField(max_length=32, default='5OXTRMZ5U464J91IFWXJFTODJSWGI8YW'),
),
migrations.AlterField(
model_name='passreset',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 28, 11, 23, 5, 786551, tzinfo=utc)),
),
migrations.DeleteModel(
name='UserProblemData',
),
migrations.DeleteModel(
name='UserStatts',
),
]
|
gpl-2.0
| -1,409,407,466,053,704,200
| 28.962963
| 110
| 0.566749
| false
| 4.085859
| false
| false
| false
|
prestodb/presto-admin
|
tests/unit/test_topology.py
|
1
|
4727
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the presto topology config
"""
import unittest
from mock import patch
from fabric.state import env
from prestoadmin import topology
from prestoadmin.standalone import config
from prestoadmin.standalone.config import StandaloneConfig
from prestoadmin.util.exception import ConfigurationError
from tests.unit.base_unit_case import BaseUnitCase
class TestTopologyConfig(BaseUnitCase):
def setUp(self):
super(TestTopologyConfig, self).setUp(capture_output=True)
@patch('tests.unit.test_topology.StandaloneConfig._get_conf_from_file')
def test_fill_conf(self, get_conf_from_file_mock):
get_conf_from_file_mock.return_value = \
{"username": "john", "port": "100"}
config = StandaloneConfig()
conf = config.read_conf()
self.assertEqual(conf, {"username": "john", "port": 100,
"coordinator": "localhost",
"workers": ["localhost"]})
def test_invalid_property(self):
conf = {"username": "me",
"port": "1234",
"coordinator": "coordinator",
"workers": ["node1", "node2"],
"invalid property": "fake"}
self.assertRaisesRegexp(ConfigurationError,
"Invalid property: invalid property",
config.validate, conf)
def test_basic_valid_conf(self):
conf = {"username": "user",
"port": 1234,
"coordinator": "my.coordinator",
"workers": ["my.worker1", "my.worker2", "my.worker3"]}
self.assertEqual(config.validate(conf.copy()), conf)
def test_valid_string_port_to_int(self):
conf = {'username': 'john',
'port': '123',
'coordinator': 'master',
'workers': ['worker1', 'worker2']}
validated_conf = config.validate(conf.copy())
self.assertEqual(validated_conf['port'], 123)
def test_empty_host(self):
self.assertRaisesRegexp(ConfigurationError,
"'' is not a valid ip address or host name",
config.validate_coordinator, (""))
def test_valid_workers(self):
workers = ["172.16.1.10", "myslave", "FE80::0202:B3FF:FE1E:8329"]
self.assertEqual(config.validate_workers(workers), workers)
def test_no_workers(self):
self.assertRaisesRegexp(ConfigurationError,
"Must specify at least one worker",
config.validate_workers, ([]))
def test_invalid_workers_type(self):
self.assertRaisesRegexp(ConfigurationError,
"Workers must be of type list. "
"Found <type 'str'>",
config.validate_workers, ("not a list"))
def test_invalid_coordinator_type(self):
self.assertRaisesRegexp(ConfigurationError,
"Host must be of type string. "
"Found <type 'list'>",
config.validate_coordinator,
(["my", "list"]))
def test_validate_workers_for_prompt(self):
workers_input = "172.16.1.10 myslave FE80::0202:B3FF:FE1E:8329"
workers_list = ["172.16.1.10", "myslave", "FE80::0202:B3FF:FE1E:8329"]
self.assertEqual(config.validate_workers_for_prompt(workers_input),
workers_list)
def test_show(self):
env.roledefs = {'coordinator': ['hello'], 'worker': ['a', 'b'],
'all': ['a', 'b', 'hello']}
env.user = 'user'
env.port = '22'
self.remove_runs_once_flag(topology.show)
topology.show()
self.assertEqual("", self.test_stderr.getvalue())
self.assertEqual("{'coordinator': 'hello',\n 'port': '22',\n "
"'username': 'user',\n 'workers': ['a',\n"
" 'b']}\n",
self.test_stdout.getvalue())
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| -4,119,446,143,831,137,300
| 38.391667
| 78
| 0.562513
| false
| 4.231871
| true
| false
| false
|
lol/BCI-BO-old
|
plot_iii3b_old.py
|
1
|
4787
|
import numpy as np
import matplotlib.pyplot as plt
import math
from pylab import figure
from my_plotter import *
import os
import sys
sys.path.append('./BCI_Framework')
import Main
import Single_Job_runner as SJR
import os
import re
if __name__ == '__main__':
bciciv1 = Main.Main('BCI_Framework','BCICIII3b','RANDOM_FOREST', 'BP', 'ALL', -1, 'python')
res_path = bciciv1.config.configuration['results_opt_path_str']
classifiers_dict = {'Boosting':0, 'LogisticRegression':1, 'RANDOM_FOREST':2,'SVM':3, 'LDA':4, 'QDA':5 , 'MLP':6}
features_dict = {'BP':0, 'logbp':1, 'wackerman':2, 'morlet':3, 'AR':4}
results = np.zeros((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]))
discarded_periods = np.empty((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]), dtype='S10')
subjects_dict = {}
for ind, subj in enumerate(bciciv1.config.configuration["subject_names_str"]):
subjects_dict.update({subj:ind})
for dirname, dirnames, filenames in os.walk(res_path):
# for subdirname in dirnames:
# fold_name = os.path.join(dirname, subdirname)
# print fold_name
for filename in filenames:
# slash_indices = re.search('0', filename)
if filename[-4:] != '.pkl':
file_name = os.path.join(dirname, filename)
backslash_indices = [m.start() for m in re.finditer("\\\\", file_name)]
underline_indices = [m.start() for m in re.finditer("_", file_name)]
feature_ext_name = file_name[backslash_indices[-2]+1:backslash_indices[-1]]
classifier_name = file_name[backslash_indices[-3]+1:backslash_indices[-2]]
subj = file_name[underline_indices[-1]+1:-4]
# print feature_ext_name, classifier_name, subj
npzfile = np.load(file_name)
error = npzfile['error']
accuracy = 100 - error*100
results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
# with open(file_name,'r') as my_file:
#
# error = float(my_file.readline())
# accuracy = 100 - error*100
# results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
## print file_name[backslash_indices[-1]+1:underline_indices[1]]
# discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
#
# print backslash_indices
for feature in features_dict.keys():
f_ind = features_dict[feature]
feature_ext_y = []
labels = []
for subject in subjects_dict.keys():
subj_ind = subjects_dict[subject]
feature_ext_y.append(tuple(results[:,f_ind,subj_ind]))
labels.append(feature + '_' + subject)
# plotter( feature_ext_y, math.floor(np.min(feature_ext_y) - 1), math.floor(np.max(feature_ext_y) + 1), feature, labels)
plotter( feature_ext_y, 46, 97, feature, labels)
for subject in subjects_dict.keys():
for feature in features_dict.keys():
print subject, feature, discarded_periods[:, features_dict[feature],subjects_dict[subject]]
# BP_y = [(72.96,78.62,78.62,76.11,79.25,79.88), (64.45,65.38,65.75,65.00,67.04,66.67), (69.45,71.86,74.26,72.04,69.75,72.6)]
# labels = ['BP_O3','BP_S4','BP_X11']
# plotter( BP_y, 64, 81, 'BP', labels)
# logBP_y = [(74.22,79.25,79.25,77.36,81.77,81.77), (62.23,66.49,66.30,65.38,66.86,66.86), (69.82,72.97,73.15,71.86,74.63,74.63)]
# labels = ['LOGBP_O3','LOGBP_S4','LOGBP_X11']
# plotter( logBP_y, 61, 84, 'logBP', labels)
# wackermann_y = [(56.61,57.24,58.24,54.72,54.72,59.75), (57.97,57.6,59.82,55.75,57.97,58.71), (60,50,57.24,61.49,60.56,62.23)]
# labels = ['wackerman_O3','wackerman_S4','wackerman_X11']
# plotter( wackermann_y, 49, 65, 'wackerman', labels)
# y_RF = [(77.98,76.72,76.72,79.87), (70.74,74.44,80.92,75.18),(75.92,73.51,77.03,78.33),(76.11,77.36,58.5, 54.72), (65,65.38,53.34,55.75), (72.04,71.86,60,61.49)]
# labels = ['BO_RF_O3','BO_RF_S4','BO_RF_X11','RF_grid_search_O3','RF_grid_search_S4','RF_grid_search_X11']
# BO_plotter( y_RF, 49, 83, 'BO_RF', labels)
plt.show()
|
gpl-3.0
| -2,834,472,925,079,466,500
| 46.88
| 180
| 0.597034
| false
| 2.97514
| true
| false
| false
|
coreknowledge2016/multi-agent-hrl
|
learning.py
|
1
|
7187
|
#from flat_game import carmunk
import carmunk
import numpy as np
import random
import csv
from nn import neural_net, LossHistory
import os.path
import timeit
NUM_INPUT = 6
GAMMA = 0.9 # Forgetting.
TUNING = False # If False, just use arbitrary, pre-selected params.
def train_net(model, params):
filename = params_to_filename(params)
observe = 1000 # Number of frames to observe before training.
epsilon = 1
train_frames = 1000000 # Number of frames to play.
batchSize = params['batchSize']
buffer = params['buffer']
# Just stuff used below.
max_car_distance = 0
car_distance = 0
t = 0
data_collect = []
replay = [] # stores tuples of (S, A, R, S').
loss_log = []
# Create a new game instance.
game_state = carmunk.GameState()
# Get initial state by doing nothing and getting the state.
_, state = game_state.frame_step(2,2)
# Let's time it.
start_time = timeit.default_timer()
# Run the frames.
while t < train_frames:
t += 1
car_distance += 1
# Choose an action.
if random.random() < epsilon or t < observe:
action = np.random.randint(0, 3) # random
action2 = np.random.randint(0, 3)
else:
# Get Q values for each action.
state = state.reshape(1,NUM_INPUT) # reshape
qval = model.predict(state, batch_size=1)
action = (np.argmax(qval)) # best
# Take action, observe new state and get our treat.
reward, new_state = game_state.frame_step(action, action2)
# Experience replay storage.
replay.append((state, action, action2, reward, new_state))
# If we're done observing, start training.
if t > observe:
# If we've stored enough in our buffer, pop the oldest.
if len(replay) > buffer:
replay.pop(0)
# Randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
# Get training values.
X_train, y_train = process_minibatch(minibatch, model)
# Train the model on this batch.
history = LossHistory()
model.fit(
X_train, y_train, batch_size=batchSize,
nb_epoch=1, verbose=0, callbacks=[history]
)
loss_log.append(history.losses)
# Update the starting state with S'.
state = new_state
# Decrement epsilon over time.
if epsilon > 0.1 and t > observe:
epsilon -= (1/train_frames)
# We died, so update stuff.
if reward == -500:
# Log the car's distance at this T.
data_collect.append([t, car_distance])
# Update max.
if car_distance > max_car_distance:
max_car_distance = car_distance
# Time it.
tot_time = timeit.default_timer() - start_time
fps = car_distance / tot_time
# Output some stuff so we can watch.
print("Max: %d at %d\tepsilon %f\t(%d)\t%f fps" %
(max_car_distance, t, epsilon, car_distance, fps))
# Reset.
car_distance = 0
start_time = timeit.default_timer()
# Save the model every 25,000 frames.
if t % 25000 == 0:
model.save_weights('saved-models/' + filename + '-' +
str(t) + '.h5',
overwrite=True)
print("Saving model %s - %d" % (filename, t))
# Log results after we're done all frames.
log_results(filename, data_collect, loss_log)
def log_results(filename, data_collect, loss_log):
# Save the results to a file so we can graph it later.
with open('results/sonar-frames/learn_data-' + filename + '.csv', 'w') as data_dump:
wr = csv.writer(data_dump)
wr.writerows(data_collect)
with open('results/sonar-frames/loss_data-' + filename + '.csv', 'w') as lf:
wr = csv.writer(lf)
for loss_item in loss_log:
wr.writerow(loss_item)
def process_minibatch(minibatch, model):
"""This does the heavy lifting, aka, the training. It's super jacked."""
X_train = []
y_train = []
# Loop through our batch and create arrays for X and y
# so that we can fit our model at every step.
for memory in minibatch:
# Get stored values.
old_state_m, action_m, action2_m, reward_m, new_state_m = memory
old_state_m = old_state_m.reshape(1,NUM_INPUT)
new_state_m = new_state_m.reshape(1,NUM_INPUT)
#print old_state_m,new_state_m
# Get prediction on old state.
old_qval = model.predict(old_state_m, batch_size=1)
# Get prediction on new state.
newQ = model.predict(new_state_m, batch_size=1)
# Get our best move. I think?
maxQ = np.max(newQ)
y = np.zeros((1, 3))
y[:] = old_qval[:]
# Check for terminal state.
if reward_m != -500: # non-terminal state
update = (reward_m + (GAMMA * maxQ))
else: # terminal state
update = reward_m
# Update the value for the action we took.
y[0][action_m] = update
X_train.append(old_state_m.reshape(NUM_INPUT,))
y_train.append(y.reshape(3,))
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
def params_to_filename(params):
return str(params['nn'][0]) + '-' + str(params['nn'][1]) + '-' + \
str(params['batchSize']) + '-' + str(params['buffer'])
def launch_learn(params):
filename = params_to_filename(params)
print("Trying %s" % filename)
# Make sure we haven't run this one.
if not os.path.isfile('results/sonar-frames/loss_data-' + filename + '.csv'):
# Create file so we don't double test when we run multiple
# instances of the script at the same time.
open('results/sonar-frames/loss_data-' + filename + '.csv', 'a').close()
print("Starting test.")
# Train.
model = neural_net(NUM_INPUT, params['nn'])
train_net(model, params)
else:
print("Already tested.")
if __name__ == "__main__":
if TUNING:
param_list = []
nn_params = [[164, 150], [256, 256],
[512, 512], [1000, 1000]]
batchSizes = [40, 100, 400]
buffers = [10000, 50000]
for nn_param in nn_params:
for batchSize in batchSizes:
for buffer in buffers:
params = {
"batchSize": batchSize,
"buffer": buffer,
"nn": nn_param
}
param_list.append(params)
for param_set in param_list:
launch_learn(param_set)
else:
nn_param = [164, 150]
params = {
"batchSize": 100,
"buffer": 200,
"nn": nn_param
}
model = neural_net(NUM_INPUT, nn_param)
train_net(model, params)
# keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0)
|
mit
| 8,669,900,673,622,070,000
| 30.384279
| 88
| 0.554056
| false
| 3.69132
| false
| false
| false
|
murych/lambdaweb
|
team/migrations/0001_initial.py
|
1
|
6857
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-09 17:59
from __future__ import unicode_literals
import ckeditor_uploader.fields
import colorfield.fields
import django.db.models.deletion
import filebrowser.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('git_username', models.CharField(max_length=300, verbose_name='Git username')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set', related_query_name='user', to='auth.Group',
verbose_name='Группа')),
('date_of_birth', models.DateField(blank=True, null=True, verbose_name='Дата рождения')),
('first_name', models.CharField(blank=True, max_length=300, null=True, verbose_name='Имя')),
('last_name', models.CharField(blank=True, max_length=300, null=True, verbose_name='Фамилия')),
('profile_image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Изображения профиля')),
],
options={
'abstract': False,
'verbose_name': 'Участника',
'verbose_name_plural': 'Участники'
},
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(
choices=[('mdi-github-circle', 'GitHub'), ('mdi-twitter', 'Twitter'), ('mdi-gmail', 'Mail'),
('mdi-vk', 'Vk'), ('mdi-facebook', 'Facebook')], max_length=300,
verbose_name='Название социальной сети')),
('link', models.CharField(max_length=300, verbose_name='Ссылка на профиль')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'социальных сетей',
'verbose_name_plural': 'социальных сетей'
}
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название')),
('description', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Описание')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участники проекта')),
('git', models.URLField(blank=True, null=True, verbose_name='Cсылка на Git')),
('image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Главное изображение')),
],
options={
'verbose_name': 'Проект',
'verbose_name_plural': 'Проекты'
}
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название партнера')),
('type_partner', models.CharField(
choices=[('info', 'Информационный'), ('finance', 'Финансовый'), ('general', 'Генеральный')],
max_length=300, verbose_name='Тип партнера')),
('description', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Описание')),
('address', models.CharField(blank=True, max_length=500, null=True, verbose_name='Адрес')),
('site', models.CharField(max_length=500, verbose_name='Сайт')),
('phone', models.CharField(blank=True, max_length=500, null=True, verbose_name='Телефон')),
('image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Изображение')),
('slug', models.SlugField()),
],
options={
'verbose_name': 'Партнер',
'verbose_name_plural': 'Партнеры',
},
),
migrations.CreateModel(
name='SEO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seo_description', models.TextField(verbose_name='SEO Описание')),
('key_words', models.TextField(verbose_name='Ключ слова')),
],
options={
'verbose_name': 'SEO',
'verbose_name_plural': 'SEO',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название')),
('color', colorfield.fields.ColorField(default='#FF0000', max_length=10)),
],
options={
'verbose_name': 'Тэг',
'verbose_name_plural': 'Тэги',
},
),
]
|
mit
| 8,095,969,056,012,038,000
| 49.48062
| 160
| 0.534091
| false
| 3.808187
| false
| false
| false
|
MalmoUniversity-DA366A/calvin-base
|
calvin/tutorial/dist-1.py
|
1
|
1306
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.nodecontrol import dispatch_node
from calvin.utilities import utils
import time
# create one node
node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001",
attributes=["node/affiliation/owner/me", "node/affiliation/name/node-1"])
# send 'new actor' command to node
counter_id = utils.new_actor(node_1, 'std.Counter', 'counter')
# send 'new actor' command to node
output_id = utils.new_actor(node_1, 'io.StandardOut', 'output')
# send 'connect' command to node
utils.connect(node_1, output_id, 'token', node_1.id, counter_id, 'integer')
# runt app for 3 seconds
time.sleep(3)
# send quite to node
utils.quit(node_1)
|
apache-2.0
| 3,418,883,178,260,327,400
| 33.368421
| 96
| 0.725881
| false
| 3.455026
| false
| false
| false
|
arunkgupta/gramps
|
gramps/plugins/tool/extractcity.py
|
1
|
26002
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Tools/Database Processing/Extract Place Data from a Place Title"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import re
from gramps.gen.ggettext import gettext as _
#-------------------------------------------------------------------------
#
# gnome/gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.display import display_help
from gramps.gui.plug import tool
from gramps.gui.utils import ProgressMeter
from gramps.gui.glade import Glade
CITY_STATE_ZIP = re.compile("((\w|\s)+)\s*,\s*((\w|\s)+)\s*(,\s*((\d|-)+))", re.UNICODE)
CITY_STATE = re.compile("((?:\w|\s)+(?:-(?:\w|\s)+)*),((?:\w|\s)+)", re.UNICODE)
CITY_LAEN = re.compile("((?:\w|\s)+(?:-(?:\w|\s)+)*)\(((?:\w|\s)+)", re.UNICODE)
STATE_ZIP = re.compile("(.+)\s+([\d-]+)", re.UNICODE)
COUNTRY = ( _(u"United States of America"), _(u"Canada"), _(u"France"),_(u"Sweden"))
STATE_MAP = {
u"AL" : (u"Alabama", 0),
u"AL." : (u"Alabama", 0),
u"ALABAMA" : (u"Alabama", 0),
u"AK" : (u"Alaska" , 0),
u"AK." : (u"Alaska" , 0),
u"ALASKA" : (u"Alaska" , 0),
u"AS" : (u"American Samoa", 0),
u"AS." : (u"American Samoa", 0),
u"AMERICAN SAMOA": (u"American Samoa", 0),
u"AZ" : (u"Arizona", 0),
u"AZ." : (u"Arizona", 0),
u"ARIZONA" : (u"Arizona", 0),
u"AR" : (u"Arkansas" , 0),
u"AR." : (u"Arkansas" , 0),
u"ARKANSAS" : (u"Arkansas" , 0),
u"ARK." : (u"Arkansas" , 0),
u"ARK" : (u"Arkansas" , 0),
u"CA" : (u"California" , 0),
u"CA." : (u"California" , 0),
u"CALIFORNIA" : (u"California" , 0),
u"CO" : (u"Colorado" , 0),
u"COLO" : (u"Colorado" , 0),
u"COLO." : (u"Colorado" , 0),
u"COLORADO" : (u"Colorado" , 0),
u"CT" : (u"Connecticut" , 0),
u"CT." : (u"Connecticut" , 0),
u"CONNECTICUT" : (u"Connecticut" , 0),
u"DE" : (u"Delaware" , 0),
u"DE." : (u"Delaware" , 0),
u"DELAWARE" : (u"Delaware" , 0),
u"DC" : (u"District of Columbia" , 0),
u"D.C." : (u"District of Columbia" , 0),
u"DC." : (u"District of Columbia" , 0),
u"DISTRICT OF COLUMBIA" : (u"District of Columbia" , 0),
u"FL" : (u"Florida" , 0),
u"FL." : (u"Florida" , 0),
u"FLA" : (u"Florida" , 0),
u"FLA." : (u"Florida" , 0),
u"FLORIDA" : (u"Florida" , 0),
u"GA" : (u"Georgia" , 0),
u"GA." : (u"Georgia" , 0),
u"GEORGIA" : (u"Georgia" , 0),
u"GU" : (u"Guam" , 0),
u"GU." : (u"Guam" , 0),
u"GUAM" : (u"Guam" , 0),
u"HI" : (u"Hawaii" , 0),
u"HI." : (u"Hawaii" , 0),
u"HAWAII" : (u"Hawaii" , 0),
u"ID" : (u"Idaho" , 0),
u"ID." : (u"Idaho" , 0),
u"IDAHO" : (u"Idaho" , 0),
u"IL" : (u"Illinois" , 0),
u"IL." : (u"Illinois" , 0),
u"ILLINOIS" : (u"Illinois" , 0),
u"ILL" : (u"Illinois" , 0),
u"ILL." : (u"Illinois" , 0),
u"ILLS" : (u"Illinois" , 0),
u"ILLS." : (u"Illinois" , 0),
u"IN" : (u"Indiana" , 0),
u"IN." : (u"Indiana" , 0),
u"INDIANA" : (u"Indiana" , 0),
u"IA" : (u"Iowa" , 0),
u"IA." : (u"Iowa" , 0),
u"IOWA" : (u"Iowa" , 0),
u"KS" : (u"Kansas" , 0),
u"KS." : (u"Kansas" , 0),
u"KANSAS" : (u"Kansas" , 0),
u"KY" : (u"Kentucky" , 0),
u"KY." : (u"Kentucky" , 0),
u"KENTUCKY" : (u"Kentucky" , 0),
u"LA" : (u"Louisiana" , 0),
u"LA." : (u"Louisiana" , 0),
u"LOUISIANA" : (u"Louisiana" , 0),
u"ME" : (u"Maine" , 0),
u"ME." : (u"Maine" , 0),
u"MAINE" : (u"Maine" , 0),
u"MD" : (u"Maryland" , 0),
u"MD." : (u"Maryland" , 0),
u"MARYLAND" : (u"Maryland" , 0),
u"MA" : (u"Massachusetts" , 0),
u"MA." : (u"Massachusetts" , 0),
u"MASSACHUSETTS" : (u"Massachusetts" , 0),
u"MI" : (u"Michigan" , 0),
u"MI." : (u"Michigan" , 0),
u"MICH." : (u"Michigan" , 0),
u"MICH" : (u"Michigan" , 0),
u"MN" : (u"Minnesota" , 0),
u"MN." : (u"Minnesota" , 0),
u"MINNESOTA" : (u"Minnesota" , 0),
u"MS" : (u"Mississippi" , 0),
u"MS." : (u"Mississippi" , 0),
u"MISSISSIPPI" : (u"Mississippi" , 0),
u"MO" : (u"Missouri" , 0),
u"MO." : (u"Missouri" , 0),
u"MISSOURI" : (u"Missouri" , 0),
u"MT" : (u"Montana" , 0),
u"MT." : (u"Montana" , 0),
u"MONTANA" : (u"Montana" , 0),
u"NE" : (u"Nebraska" , 0),
u"NE." : (u"Nebraska" , 0),
u"NEBRASKA" : (u"Nebraska" , 0),
u"NV" : (u"Nevada" , 0),
u"NV." : (u"Nevada" , 0),
u"NEVADA" : (u"Nevada" , 0),
u"NH" : (u"New Hampshire" , 0),
u"NH." : (u"New Hampshire" , 0),
u"N.H." : (u"New Hampshire" , 0),
u"NEW HAMPSHIRE" : (u"New Hampshire" , 0),
u"NJ" : (u"New Jersey" , 0),
u"NJ." : (u"New Jersey" , 0),
u"N.J." : (u"New Jersey" , 0),
u"NEW JERSEY" : (u"New Jersey" , 0),
u"NM" : (u"New Mexico" , 0),
u"NM." : (u"New Mexico" , 0),
u"NEW MEXICO" : (u"New Mexico" , 0),
u"NY" : (u"New York" , 0),
u"N.Y." : (u"New York" , 0),
u"NY." : (u"New York" , 0),
u"NEW YORK" : (u"New York" , 0),
u"NC" : (u"North Carolina" , 0),
u"NC." : (u"North Carolina" , 0),
u"N.C." : (u"North Carolina" , 0),
u"NORTH CAROLINA": (u"North Carolina" , 0),
u"ND" : (u"North Dakota" , 0),
u"ND." : (u"North Dakota" , 0),
u"N.D." : (u"North Dakota" , 0),
u"NORTH DAKOTA" : (u"North Dakota" , 0),
u"OH" : (u"Ohio" , 0),
u"OH." : (u"Ohio" , 0),
u"OHIO" : (u"Ohio" , 0),
u"OK" : (u"Oklahoma" , 0),
u"OKLA" : (u"Oklahoma" , 0),
u"OKLA." : (u"Oklahoma" , 0),
u"OK." : (u"Oklahoma" , 0),
u"OKLAHOMA" : (u"Oklahoma" , 0),
u"OR" : (u"Oregon" , 0),
u"OR." : (u"Oregon" , 0),
u"OREGON" : (u"Oregon" , 0),
u"PA" : (u"Pennsylvania" , 0),
u"PA." : (u"Pennsylvania" , 0),
u"PENNSYLVANIA" : (u"Pennsylvania" , 0),
u"PR" : (u"Puerto Rico" , 0),
u"PUERTO RICO" : (u"Puerto Rico" , 0),
u"RI" : (u"Rhode Island" , 0),
u"RI." : (u"Rhode Island" , 0),
u"R.I." : (u"Rhode Island" , 0),
u"RHODE ISLAND" : (u"Rhode Island" , 0),
u"SC" : (u"South Carolina" , 0),
u"SC." : (u"South Carolina" , 0),
u"S.C." : (u"South Carolina" , 0),
u"SOUTH CAROLINA": (u"South Carolina" , 0),
u"SD" : (u"South Dakota" , 0),
u"SD." : (u"South Dakota" , 0),
u"S.D." : (u"South Dakota" , 0),
u"SOUTH DAKOTA" : (u"South Dakota" , 0),
u"TN" : (u"Tennessee" , 0),
u"TN." : (u"Tennessee" , 0),
u"TENNESSEE" : (u"Tennessee" , 0),
u"TENN." : (u"Tennessee" , 0),
u"TENN" : (u"Tennessee" , 0),
u"TX" : (u"Texas" , 0),
u"TX." : (u"Texas" , 0),
u"TEXAS" : (u"Texas" , 0),
u"UT" : (u"Utah" , 0),
u"UT." : (u"Utah" , 0),
u"UTAH" : (u"Utah" , 0),
u"VT" : (u"Vermont" , 0),
u"VT." : (u"Vermont" , 0),
u"VERMONT" : (u"Vermont" , 0),
u"VI" : (u"Virgin Islands" , 0),
u"VIRGIN ISLANDS": (u"Virgin Islands" , 0),
u"VA" : (u"Virginia" , 0),
u"VA." : (u"Virginia" , 0),
u"VIRGINIA" : (u"Virginia" , 0),
u"WA" : (u"Washington" , 0),
u"WA." : (u"Washington" , 0),
u"WASHINGTON" : (u"Washington" , 0),
u"WV" : (u"West Virginia" , 0),
u"WV." : (u"West Virginia" , 0),
u"W.V." : (u"West Virginia" , 0),
u"WEST VIRGINIA" : (u"West Virginia" , 0),
u"WI" : (u"Wisconsin" , 0),
u"WI." : (u"Wisconsin" , 0),
u"WISCONSIN" : (u"Wisconsin" , 0),
u"WY" : (u"Wyoming" , 0),
u"WY." : (u"Wyoming" , 0),
u"WYOMING" : (u"Wyoming" , 0),
u"AB" : (u"Alberta", 1),
u"AB." : (u"Alberta", 1),
u"ALBERTA" : (u"Alberta", 1),
u"BC" : (u"British Columbia", 1),
u"BC." : (u"British Columbia", 1),
u"B.C." : (u"British Columbia", 1),
u"MB" : (u"Manitoba", 1),
u"MB." : (u"Manitoba", 1),
u"MANITOBA" : (u"Manitoba", 1),
u"NB" : (u"New Brunswick", 1),
u"N.B." : (u"New Brunswick", 1),
u"NB." : (u"New Brunswick", 1),
u"NEW BRUNSWICK" : (u"New Brunswick", 1),
u"NL" : (u"Newfoundland and Labrador", 1),
u"NL." : (u"Newfoundland and Labrador", 1),
u"N.L." : (u"Newfoundland and Labrador", 1),
u"NEWFOUNDLAND" : (u"Newfoundland and Labrador", 1),
u"NEWFOUNDLAND AND LABRADOR" : (u"Newfoundland and Labrador", 1),
u"LABRADOR" : (u"Newfoundland and Labrador", 1),
u"NT" : (u"Northwest Territories", 1),
u"NT." : (u"Northwest Territories", 1),
u"N.T." : (u"Northwest Territories", 1),
u"NORTHWEST TERRITORIES" : (u"Northwest Territories", 1),
u"NS" : (u"Nova Scotia", 1),
u"NS." : (u"Nova Scotia", 1),
u"N.S." : (u"Nova Scotia", 1),
u"NOVA SCOTIA" : (u"Nova Scotia", 1),
u"NU" : (u"Nunavut", 1),
u"NU." : (u"Nunavut", 1),
u"NUNAVUT" : (u"Nunavut", 1),
u"ON" : (u"Ontario", 1),
u"ON." : (u"Ontario", 1),
u"ONTARIO" : (u"Ontario", 1),
u"PE" : (u"Prince Edward Island", 1),
u"PE." : (u"Prince Edward Island", 1),
u"PRINCE EDWARD ISLAND" : (u"Prince Edward Island", 1),
u"QC" : (u"Quebec", 1),
u"QC." : (u"Quebec", 1),
u"QUEBEC" : (u"Quebec", 1),
u"SK" : (u"Saskatchewan", 1),
u"SK." : (u"Saskatchewan", 1),
u"SASKATCHEWAN" : (u"Saskatchewan", 1),
u"YT" : (u"Yukon", 1),
u"YT." : (u"Yukon", 1),
u"YUKON" : (u"Yukon", 1),
u"ALSACE" : (u"Alsace", 2),
u"ALS" : (u"ALS-Alsace", 2),
u"AQUITAINE" : (u"Aquitaine", 2),
u"AQU" : (u"AQU-Aquitaine", 2),
u"AUVERGNE" : (u"Auvergne", 2),
u"AUV" : (u"AUV-Auvergne", 2),
u"BOURGOGNE" : (u"Bourgogne", 2),
u"BOU" : (u"BOU-Bourgogne", 2),
u"BRETAGNE" : (u"Bretagne", 2),
u"BRE" : (u"BRE-Bretagne", 2),
u"CENTRE" : (u"Centre - Val de Loire", 2),
u"CEN" : (u"CEN-Centre - Val de Loire", 2),
u"CHAMPAGNE" : (u"Champagne-Ardennes", 2),
u"CHA" : (u"CHA-Champagne-Ardennes", 2),
u"CORSE" : (u"Corse", 2),
u"COR" : (u"COR-Corse", 2),
u"FRANCHE-COMTE" : (u"Franche-Comté", 2),
u"FCO" : (u"FCO-Franche-Comté", 2),
u"ILE DE FRANCE" : (u"Ile de France", 2),
u"IDF" : (u"IDF-Ile de France", 2),
u"LIMOUSIN" : (u"Limousin", 2),
u"LIM" : (u"LIM-Limousin", 2),
u"LORRAINE" : (u"Lorraine", 2),
u"LOR" : (u"LOR-Lorraine", 2),
u"LANGUEDOC" : (u"Languedoc-Roussillon", 2),
u"LRO" : (u"LRO-Languedoc-Roussillon", 2),
u"MIDI PYRENEE" : (u"Midi-Pyrénée", 2),
u"MPY" : (u"MPY-Midi-Pyrénée", 2),
u"HAUTE NORMANDIE": (u"Haute Normandie", 2),
u"NOH" : (u"NOH-Haute Normandie", 2),
u"BASSE NORMANDIE": (u"Basse Normandie", 2),
u"NOB" : (u"NOB-Basse Normandie", 2),
u"NORD PAS CALAIS": (u"Nord-Pas de Calais", 2),
u"NPC" : (u"NPC-Nord-Pas de Calais", 2),
u"PROVENCE" : (u"Provence-Alpes-Côte d'Azur", 2),
u"PCA" : (u"PCA-Provence-Alpes-Côte d'Azur", 2),
u"POITOU-CHARENTES": (u"Poitou-Charentes", 2),
u"PCH" : (u"PCH-Poitou-Charentes", 2),
u"PAYS DE LOIRE" : (u"Pays de Loire", 2),
u"PDL" : (u"PDL-Pays de Loire", 2),
u"PICARDIE" : (u"Picardie", 2),
u"PIC" : (u"PIC-Picardie", 2),
u"RHONE-ALPES" : (u"Rhône-Alpes", 2),
u"RAL" : (u"RAL-Rhône-Alpes", 2),
u"AOM" : (u"AOM-Autres Territoires d'Outre-Mer", 2),
u"COM" : (u"COM-Collectivité Territoriale d'Outre-Mer", 2),
u"DOM" : (u"DOM-Départements d'Outre-Mer", 2),
u"TOM" : (u"TOM-Territoires d'Outre-Mer", 2),
u"GUA" : (u"GUA-Guadeloupe", 2),
u"GUADELOUPE" : (u"Guadeloupe", 2),
u"MAR" : (u"MAR-Martinique", 2),
u"MARTINIQUE" : (u"Martinique", 2),
u"GUY" : (u"GUY-Guyane", 2),
u"GUYANE" : (u"Guyane", 2),
u"REU" : (u"REU-Réunion", 2),
u"REUNION" : (u"Réunion", 2),
u"MIQ" : (u"MIQ-Saint-Pierre et Miquelon", 2),
u"MIQUELON" : (u"Saint-Pierre et Miquelon", 2),
u"MAY" : (u"MAY-Mayotte", 2),
u"MAYOTTE" : (u"Mayotte", 2),
u"(A)" : (u"Stockholms stad", 3),
u"(AB)" : (u"Stockholms stad/län", 3),
u"(B)" : (u"Stockholms län", 3),
u"(C)" : (u"Uppsala län", 3),
u"(D)" : (u"Södermanlands län", 3),
u"(E)" : (u"Östergötlands län", 3),
u"(F)" : (u"Jönköpings län", 3),
u"(G)" : (u"Kronobergs län", 3),
u"(H)" : (u"Kalmar län", 3),
u"(I)" : (u"Gotlands län", 3),
u"(K)" : (u"Blekinge län", 3),
u"(L)" : (u"Kristianstads län", 3),
u"(M)" : (u"Malmöhus län", 3),
u"(N)" : (u"Hallands län", 3),
u"(O)" : (u"Göteborgs- och Bohuslän", 3),
u"(P)" : (u"Älvsborgs län", 3),
u"(R)" : (u"Skaraborg län", 3),
u"(S)" : (u"Värmlands län", 3),
u"(T)" : (u"Örebro län", 3),
u"(U)" : (u"Västmanlands län", 3),
u"(W)" : (u"Kopparbergs län", 3),
u"(X)" : (u"Gävleborgs län", 3),
u"(Y)" : (u"Västernorrlands län", 3),
u"(AC)" : (u"Västerbottens län", 3),
u"(BD)" : (u"Norrbottens län", 3),
}
COLS = [
(_('Place title'), 1),
(_('City'), 2),
(_('State'), 3),
(_('ZIP/Postal Code'), 4),
(_('Country'), 5)
]
#-------------------------------------------------------------------------
#
# ExtractCity
#
#-------------------------------------------------------------------------
class ExtractCity(tool.BatchTool, ManagedWindow):
"""
Extracts city, state, and zip code information from an place description
if the title is empty and the description falls into the category of:
New York, NY 10000
Sorry for those not in the US or Canada. I doubt this will work for any
other locales.
Works for Sweden if the decriptions is like
Stockholm (A)
where the letter A is the abbreviation letter for laen.
Works for France if the description is like
Paris, IDF 75000, FRA
or Paris, ILE DE FRANCE 75000, FRA
"""
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.label = _('Extract Place data')
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.set_window(Gtk.Window(), Gtk.Label(), '')
tool.BatchTool.__init__(self, dbstate, options_class, name)
if not self.fail:
uistate.set_busy_cursor(True)
self.run(dbstate.db)
uistate.set_busy_cursor(False)
def run(self, db):
"""
Performs the actual extraction of information
"""
self.progress = ProgressMeter(_('Checking Place Titles'), '')
self.progress.set_pass(_('Looking for place fields'),
self.db.get_number_of_places())
self.name_list = []
for place in db.iter_places():
descr = place.get_title()
loc = place.get_main_location()
self.progress.step()
if loc.get_street() == loc.get_city() == \
loc.get_state() == loc.get_postal_code() == "":
match = CITY_STATE_ZIP.match(descr.strip())
if match:
data = match.groups()
city = data[0]
state = data[2]
postal = data[5]
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
# Check if there is a left parant. in the string, might be Swedish laen.
match = CITY_LAEN.match(descr.strip().replace(","," "))
if match:
data = match.groups()
city = data[0]
state = '(' + data[1] + ')'
postal = None
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
match = CITY_STATE.match(descr.strip())
if match:
data = match.groups()
city = data[0]
state = data[1]
postal = None
if state:
m0 = STATE_ZIP.match(state)
if m0:
(state, postal) = m0.groups()
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
val = " ".join(descr.strip().split()).upper()
new_state = STATE_MAP.get(val)
if new_state:
self.name_list.append(
(place.handle, (None, new_state[0], None,
COUNTRY[new_state[1]])))
self.progress.close()
if self.name_list:
self.display()
else:
self.close()
from gramps.gui.dialog import OkDialog
OkDialog(_('No modifications made'),
_("No place information could be extracted."))
def display(self):
self.top = Glade("changenames.glade")
window = self.top.toplevel
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_ok_clicked" : self.on_ok_clicked,
"on_help_clicked" : self.on_help_clicked,
"on_delete_event" : self.close,
})
self.list = self.top.get_object("list")
self.set_window(window, self.top.get_object('title'), self.label)
lbl = self.top.get_object('info')
lbl.set_line_wrap(True)
lbl.set_text(
_('Below is a list of Places with the possible data that can '
'be extracted from the place title. Select the places you '
'wish Gramps to convert.'))
self.model = Gtk.ListStore(GObject.TYPE_BOOLEAN, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING)
r = Gtk.CellRendererToggle()
r.connect('toggled', self.toggled)
c = Gtk.TreeViewColumn(_('Select'), r, active=0)
self.list.append_column(c)
for (title, col) in COLS:
render = Gtk.CellRendererText()
if col > 1:
render.set_property('editable', True)
render.connect('edited', self.__change_name, col)
self.list.append_column(
Gtk.TreeViewColumn(title, render, text=col))
self.list.set_model(self.model)
self.iter_list = []
self.progress.set_pass(_('Building display'), len(self.name_list))
for (id, data) in self.name_list:
place = self.db.get_place_from_handle(id)
descr = place.get_title()
handle = self.model.append()
self.model.set_value(handle, 0, True)
self.model.set_value(handle, 1, descr)
if data[0]:
self.model.set_value(handle, 2, data[0])
if data[1]:
self.model.set_value(handle, 3, data[1])
if data[2]:
self.model.set_value(handle, 4, data[2])
if data[3]:
self.model.set_value(handle, 5, data[3])
self.model.set_value(handle, 6, id)
self.iter_list.append(handle)
self.progress.step()
self.progress.close()
self.show()
def __change_name(self, text, path, new_text, col):
self.model[path][col] = new_text
return
def toggled(self, cell, path_string):
path = tuple(map(int, path_string.split(':')))
row = self.model[path]
row[0] = not row[0]
def build_menu_names(self, obj):
return (self.label, None)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help()
def on_ok_clicked(self, obj):
with DbTxn(_("Extract Place data"), self.db, batch=True) as self.trans:
self.db.disable_signals()
changelist = [node for node in self.iter_list
if self.model.get_value(node, 0)]
for change in changelist:
row = self.model[change]
place = self.db.get_place_from_handle(row[6])
(city, state, postal, country) = (row[2], row[3], row[4], row[5])
if city:
place.get_main_location().set_city(city)
if state:
place.get_main_location().set_state(state)
if postal:
place.get_main_location().set_postal_code(postal)
if country:
place.get_main_location().set_country(country)
self.db.commit_place(place, self.trans)
self.db.enable_signals()
self.db.request_rebuild()
self.close()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class ExtractCityOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
|
gpl-2.0
| 1,880,201,087,278,255,900
| 40.255962
| 88
| 0.432139
| false
| 2.881092
| false
| false
| false
|
hugovincent/pirate-swd
|
SWDCommon.py
|
1
|
4282
|
import time
class DebugPort:
def __init__ (self, swd):
self.swd = swd
# read the IDCODE
# Hugo: according to ARM DDI 0316D we should have 0x2B.. not 0x1B.., but
# 0x1B.. is what upstream used, so leave it in here...
if self.idcode() not in [0x1BA01477, 0x2BA01477]:
print "warning: unexpected idcode"
# power shit up
self.swd.writeSWD(False, 1, 0x54000000)
if (self.status() >> 24) != 0xF4:
print "error powering up system"
sys.exit(1)
# get the SELECT register to a known state
self.select(0,0)
self.curAP = 0
self.curBank = 0
def idcode (self):
return self.swd.readSWD(False, 0)
def abort (self, orunerr, wdataerr, stickyerr, stickycmp, dap):
value = 0x00000000
value = value | (0x10 if orunerr else 0x00)
value = value | (0x08 if wdataerr else 0x00)
value = value | (0x04 if stickyerr else 0x00)
value = value | (0x02 if stickycmp else 0x00)
value = value | (0x01 if dap else 0x00)
self.swd.writeSWD(False, 0, value)
def status (self):
return self.swd.readSWD(False, 1)
def control (self, trnCount = 0, trnMode = 0, maskLane = 0, orunDetect = 0):
value = 0x54000000
value = value | ((trnCount & 0xFFF) << 12)
value = value | ((maskLane & 0x00F) << 8)
value = value | ((trnMode & 0x003) << 2)
value = value | (0x1 if orunDetect else 0x0)
self.swd.writeSWD(False, 1, value)
def select (self, apsel, apbank):
value = 0x00000000
value = value | ((apsel & 0xFF) << 24)
value = value | ((apbank & 0x0F) << 4)
self.swd.writeSWD(False, 2, value)
def readRB (self):
return self.swd.readSWD(False, 3)
def readAP (self, apsel, address):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
return self.swd.readSWD(True, adrReg)
def writeAP (self, apsel, address, data, ignore = False):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
self.swd.writeSWD(True, adrReg, data, ignore)
class MEM_AP:
def __init__ (self, dp, apsel):
self.dp = dp
self.apsel = apsel
self.csw(1,2) # 32-bit auto-incrementing addressing
def csw (self, addrInc, size):
self.dp.readAP(self.apsel, 0x00)
csw = self.dp.readRB() & 0xFFFFFF00
self.dp.writeAP(self.apsel, 0x00, csw + (addrInc << 4) + size)
def idcode (self):
self.dp.readAP(self.apsel, 0xFC)
return self.dp.readRB()
def readWord (self, adr):
self.dp.writeAP(self.apsel, 0x04, adr)
self.dp.readAP(self.apsel, 0x0C)
return self.dp.readRB()
def writeWord (self, adr, data):
self.dp.writeAP(self.apsel, 0x04, adr)
self.dp.writeAP(self.apsel, 0x0C, data)
return self.dp.readRB()
def readBlock (self, adr, count):
self.dp.writeAP(self.apsel, 0x04, adr)
vals = [self.dp.readAP(self.apsel, 0x0C) for off in range(count)]
vals.append(self.dp.readRB())
return vals[1:]
def writeBlock (self, adr, data):
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
self.dp.writeAP(self.apsel, 0x0C, val)
def writeBlockNonInc (self, adr, data):
self.csw(0, 2) # 32-bit non-incrementing addressing
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
self.dp.writeAP(self.apsel, 0x0C, val)
self.csw(1, 2) # 32-bit auto-incrementing addressing
def writeHalfs (self, adr, data):
self.csw(2, 1) # 16-bit packed-incrementing addressing
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
time.sleep(0.001)
self.dp.writeAP(self.apsel, 0x0C, val, ignore = True)
self.csw(1, 2) # 32-bit auto-incrementing addressing
|
bsd-3-clause
| 1,786,803,907,590,919,400
| 34.683333
| 81
| 0.578001
| false
| 2.988137
| false
| false
| false
|
oscurart/BlenderAddons
|
oscurart_delta_to_global.py
|
1
|
1400
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# AUTHOR: Eugenio Pignataro (Oscurart) www.oscurart.com.ar
# USAGE: Select object and run. This script invert apply transformations to deltas.
import bpy
for ob in bpy.context.selected_objects:
mat = ob.matrix_world
ob.location = mat.to_translation()
ob.delta_location = (0,0,0)
if ob.rotation_mode == "QUATERNION":
ob.rotation_quaternion = mat.to_quaternion()
ob.delta_rotation_quaternion = (1,0,0,0)
else:
ob.rotation_euler = mat.to_euler()
ob.delta_rotation_euler = (0,0,0)
ob.scale = mat.to_scale()
ob.delta_scale = (1,1,1)
|
gpl-2.0
| 6,044,060,806,490,240,000
| 35.842105
| 83
| 0.692143
| false
| 3.5
| false
| false
| false
|
dorotapalicova/GoldDigger
|
gold_digger/data_providers/grandtrunk.py
|
1
|
2755
|
# -*- coding: utf-8 -*-
from datetime import datetime, date
from collections import defaultdict
from ._provider import Provider
class GrandTrunk(Provider):
"""
Service offers day exchange rates based on Federal Reserve and European Central Bank.
It is currently free for use in low-volume and non-commercial settings.
"""
BASE_URL = "http://currencies.apps.grandtrunk.net"
BASE_CURRENCY = "USD"
name = "grandtrunk"
def get_by_date(self, date_of_exchange, currency):
date_str = date_of_exchange.strftime(format="%Y-%m-%d")
self.logger.debug("Requesting GrandTrunk for %s (%s)", currency, date_str, extra={"currency": currency, "date": date_str})
response = self._get("{url}/getrate/{date}/{from_currency}/{to}".format(
url=self.BASE_URL, date=date_str, from_currency=self.BASE_CURRENCY, to=currency))
if response:
return self._to_decimal(response.text.strip(), currency)
def get_all_by_date(self, date_of_exchange, currencies):
day_rates = {}
for currency in currencies:
response = self._get("{url}/getrate/{date}/{from_currency}/{to}".format(
url=self.BASE_URL, date=date_of_exchange, from_currency=self.BASE_CURRENCY, to=currency))
if response:
decimal_value = self._to_decimal(response.text.strip(), currency)
if decimal_value:
day_rates[currency] = decimal_value
return day_rates
def get_historical(self, origin_date, currencies):
day_rates = defaultdict(dict)
origin_date_string = origin_date.strftime(format="%Y-%m-%d")
for currency in currencies:
response = self._get("{url}/getrange/{from_date}/{to_date}/{from_currency}/{to}".format(
url=self.BASE_URL, from_date=origin_date_string, to_date=date.today(), from_currency=self.BASE_CURRENCY, to=currency
))
records = response.text.strip().split("\n") if response else []
for record in records:
record = record.rstrip()
if record:
try:
date_string, exchange_rate_string = record.split(" ")
day = datetime.strptime(date_string, "%Y-%m-%d")
except ValueError as e:
self.logger.error("%s - Parsing of rate&date on record '%s' failed: %s" % (self, record, e))
continue
decimal_value = self._to_decimal(exchange_rate_string, currency)
if decimal_value:
day_rates[day][currency] = decimal_value
return day_rates
def __str__(self):
return self.name
|
apache-2.0
| 8,640,422,715,685,752,000
| 45.694915
| 132
| 0.587659
| false
| 3.986975
| false
| false
| false
|
caktus/aws-web-stacks
|
stack/load_balancer.py
|
1
|
4262
|
from troposphere import GetAtt, If, Join, Output, Ref
from troposphere import elasticloadbalancing as elb
from . import USE_ECS, USE_GOVCLOUD
from .security_groups import load_balancer_security_group
from .template import template
from .utils import ParameterWithDefaults as Parameter
from .vpc import public_subnet_a, public_subnet_b
# Web worker
if USE_ECS:
web_worker_port = Ref(template.add_parameter(
Parameter(
"WebWorkerPort",
Description="Web worker container exposed port",
Type="Number",
Default="8000",
),
group="Load Balancer",
label="Web Worker Port",
))
else:
# default to port 80 for EC2 and Elastic Beanstalk options
web_worker_port = Ref(template.add_parameter(
Parameter(
"WebWorkerPort",
Description="Default web worker exposed port (non-HTTPS)",
Type="Number",
Default="80",
),
group="Load Balancer",
label="Web Worker Port",
))
web_worker_protocol = Ref(template.add_parameter(
Parameter(
"WebWorkerProtocol",
Description="Web worker instance protocol",
Type="String",
Default="HTTP",
AllowedValues=["HTTP", "HTTPS"],
),
group="Load Balancer",
label="Web Worker Protocol",
))
# Web worker health check
web_worker_health_check_protocol = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheckProtocol",
Description="Web worker health check protocol",
Type="String",
Default="TCP",
AllowedValues=["TCP", "HTTP", "HTTPS"],
),
group="Load Balancer",
label="Health Check: Protocol",
))
web_worker_health_check_port = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheckPort",
Description="Web worker health check port",
Type="Number",
Default="80",
),
group="Load Balancer",
label="Health Check: Port",
))
web_worker_health_check = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheck",
Description="Web worker health check URL path, e.g., \"/health-check\"; "
"required unless WebWorkerHealthCheckProtocol is TCP",
Type="String",
Default="",
),
group="Load Balancer",
label="Health Check: URL",
))
# Web load balancer
listeners = [
elb.Listener(
LoadBalancerPort=80,
InstanceProtocol=web_worker_protocol,
InstancePort=web_worker_port,
Protocol='HTTP',
)
]
if USE_GOVCLOUD:
# configure the default HTTPS listener to pass TCP traffic directly,
# since GovCloud doesn't support the Certificate Manager (this can be
# modified to enable SSL termination at the load balancer via the AWS
# console, if needed)
listeners.append(elb.Listener(
LoadBalancerPort=443,
InstanceProtocol='TCP',
InstancePort=443,
Protocol='TCP',
))
else:
from .certificates import application as application_certificate
from .certificates import cert_condition
listeners.append(If(cert_condition, elb.Listener(
LoadBalancerPort=443,
InstanceProtocol=web_worker_protocol,
InstancePort=web_worker_port,
Protocol='HTTPS',
SSLCertificateId=application_certificate,
), Ref("AWS::NoValue")))
load_balancer = elb.LoadBalancer(
'LoadBalancer',
template=template,
Subnets=[
Ref(public_subnet_a),
Ref(public_subnet_b),
],
SecurityGroups=[Ref(load_balancer_security_group)],
Listeners=listeners,
HealthCheck=elb.HealthCheck(
Target=Join("", [
web_worker_health_check_protocol,
":",
web_worker_health_check_port,
web_worker_health_check,
]),
HealthyThreshold="2",
UnhealthyThreshold="2",
Interval="100",
Timeout="10",
),
CrossZone=True,
)
template.add_output(Output(
"LoadBalancerDNSName",
Description="Loadbalancer DNS",
Value=GetAtt(load_balancer, "DNSName")
))
template.add_output(Output(
"LoadBalancerHostedZoneID",
Description="Loadbalancer hosted zone",
Value=GetAtt(load_balancer, "CanonicalHostedZoneNameID")
))
|
mit
| 8,519,122,697,732,304,000
| 27.039474
| 81
| 0.635852
| false
| 4.1139
| false
| false
| false
|
technige/py2neo
|
test/integration/test_types.py
|
1
|
4086
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neotime import Date, Time, DateTime, Duration
from packaging.version import Version
from pytest import skip
from py2neo.data import Node
from py2neo.data.spatial import CartesianPoint, WGS84Point
def test_null(graph):
i = None
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_true(graph):
i = True
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_false(graph):
i = False
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_int(graph):
for i in range(-128, 128):
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_float(graph):
for i in range(-128, 128):
f = float(i) + 0.5
o = graph.evaluate("RETURN $x", x=f)
assert o == f
def test_string(graph):
i = u"hello, world"
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_bytes(graph):
i = bytearray([65, 66, 67])
o = graph.evaluate("RETURN $x", x=i)
# The values are coerced to bytearray before comparison
# as HTTP does not support byte parameters, instead
# coercing such values to lists of integers.
assert bytearray(o) == bytearray(i)
def test_list(graph):
i = [65, 66, 67]
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_dict(graph):
i = {"one": 1, "two": 2}
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_node(graph):
i = Node("Person", name="Alice")
o = graph.evaluate("CREATE (a:Person {name: 'Alice'}) RETURN a")
assert o.labels == i.labels
assert dict(o) == dict(i)
def test_relationship(graph):
o = graph.evaluate("CREATE ()-[r:KNOWS {since: 1999}]->() RETURN r")
assert type(o).__name__ == "KNOWS"
assert dict(o) == {"since": 1999}
def test_path(graph):
o = graph.evaluate("CREATE p=(:Person {name: 'Alice'})-[:KNOWS]->(:Person {name: 'Bob'}) RETURN p")
assert len(o) == 1
assert o.start_node.labels == {"Person"}
assert dict(o.start_node) == {"name": "Alice"}
assert type(o.relationships[0]).__name__ == "KNOWS"
assert o.end_node.labels == {"Person"}
assert dict(o.end_node) == {"name": "Bob"}
def skip_if_no_temporal_support(graph):
connector = graph.service.connector
if graph.service.kernel_version < Version("3.4"):
skip("Temporal type tests are only valid for Neo4j 3.4+")
if connector.profile.protocol != "bolt":
skip("Temporal type tests are only valid for Bolt connectors")
def test_date(graph):
skip_if_no_temporal_support(graph)
i = Date(2014, 8, 6)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_time(graph):
skip_if_no_temporal_support(graph)
i = Time(12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_date_time(graph):
skip_if_no_temporal_support(graph)
i = DateTime(2014, 8, 6, 12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_duration(graph):
skip_if_no_temporal_support(graph)
i = Duration(months=1, days=2, seconds=3)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_cartesian_point(graph):
skip_if_no_temporal_support(graph)
i = CartesianPoint((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_wgs84_point(graph):
skip_if_no_temporal_support(graph)
i = WGS84Point((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
|
apache-2.0
| 5,860,425,260,002,688,000
| 25.36129
| 103
| 0.631424
| false
| 3.157651
| true
| false
| false
|
hirokihamasaki/irma
|
probe/modules/antivirus/clamav/clam.py
|
1
|
3588
|
#
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from ..base import Antivirus
log = logging.getLogger(__name__)
class Clam(Antivirus):
_name = "Clam AntiVirus Scanner (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super(Clam, self).__init__(*args, **kwargs)
# scan tool variables
self._scan_args = (
"--infected " # only print infected files
"--fdpass " # avoid file access problem as clamdameon
# is runned by clamav user
"--no-summary " # disable summary at the end of scanning
"--stdout " # do not write to stderr
)
self._scan_patterns = [
re.compile(r'(?P<file>.*): (?P<name>[^\s]+) FOUND', re.IGNORECASE)
]
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
result = None
if self.scan_path:
cmd = self.build_cmd(self.scan_path, '--version')
retcode, stdout, stderr = self.run_cmd(cmd)
if not retcode:
matches = re.search(r'(?P<version>\d+(\.\d+)+)',
stdout,
re.IGNORECASE)
if matches:
result = matches.group('version').strip()
return result
def get_database(self):
"""return list of files in the database"""
# NOTE: we can use clamconf to get database location, but it is not
# always installed by default. Instead, hardcode some common paths and
# locate files using predefined patterns
search_paths = [
'/var/lib/clamav', # default location in debian
]
database_patterns = [
'main.cvd',
'daily.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'bytecode.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'safebrowsing.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'*.hdb', # clamav hash database
'*.mdb', # clamav MD5, PE-section based
'*.ndb', # clamav extended signature format
'*.ldb', # clamav logical signatures
]
results = []
for pattern in database_patterns:
result = self.locate(pattern, search_paths, syspath=False)
results.extend(result)
return results if results else None
def get_scan_path(self):
"""return the full path of the scan tool"""
paths = self.locate("clamdscan")
return paths[0] if paths else None
|
apache-2.0
| 4,104,439,727,466,220,000
| 37.170213
| 78
| 0.516165
| false
| 4.349091
| false
| false
| false
|
mccormickmichael/laurel
|
scaffold/cf/net.py
|
1
|
1726
|
#!/usr/bin/python
# Common functions and builders for VPC Templates
import troposphere as tp
import troposphere.ec2 as ec2
CIDR_ANY = '0.0.0.0/0'
CIDR_NONE = '0.0.0.0/32'
HTTP = 80
HTTPS = 443
SSH = 22
EPHEMERAL = (32767, 65536)
NAT = (1024, 65535)
ANY_PORT = (0, 65535)
TCP = '6'
UDP = '17'
ICMP = '1'
ANY_PROTOCOL = '-1'
def sg_rule(cidr, ports, protocol):
from_port, to_port = _asduo(ports)
return ec2.SecurityGroupRule(CidrIp=cidr,
FromPort=from_port,
ToPort=to_port,
IpProtocol=protocol)
def nacl_ingress(name, nacl, number, ports, protocol, cidr=CIDR_ANY, action='allow'):
return _nacl_rule(name, nacl, number, ports, protocol, False, cidr, action)
def nacl_egress(name, nacl, number, ports, protocol, cidr=CIDR_ANY, action='allow'):
return _nacl_rule(name, nacl, number, ports, protocol, True, cidr, action)
def _nacl_rule(name, nacl, number, ports, protocol, egress, cidr, action):
from_port, to_port = _asduo(ports)
return ec2.NetworkAclEntry(name,
NetworkAclId=_asref(nacl),
RuleNumber=number,
Protocol=protocol,
PortRange=ec2.PortRange(From=from_port, To=to_port),
Egress=egress,
RuleAction=action,
CidrBlock=cidr)
def _asduo(d):
return d if type(d) in [list, tuple] else (d, d)
def _asref(o):
return o if isinstance(o, tp.Ref) else tp.Ref(o)
def az_name(region, az):
if az.startswith(region):
return az
return region + az.lower()
|
unlicense
| -5,010,251,801,299,284,000
| 25.96875
| 85
| 0.562572
| false
| 3.332046
| false
| false
| false
|
warriorframework/warriorframework
|
warrior/Framework/ClassUtils/rest_utils_class.py
|
1
|
26837
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""API for operations related to REST Interfaces
Packages used = Requests (documentation available at http://docs.python-requests.org/) """
import re
import time
import os
import os.path
import json as JSON
from xml.dom.minidom import parseString
from Framework.Utils.testcase_Utils import pNote
import Framework.Utils as Utils
from Framework.ClassUtils.json_utils_class import JsonUtils
from Framework.Utils.print_Utils import print_error
from Framework.Utils import string_Utils
from Framework.Utils import data_Utils
class WRest(object):
"""WRest class has methods required to interact
with REST interfaces"""
def __init__(self):
"""constructor for WRest """
self.req = None
self.import_requests()
self.json_utils = JsonUtils()
def import_requests(self):
"""Import the requests module """
try:
import requests
except ImportError:
pNote("Requests module is not installed"\
"Please install requests module to"\
"perform any activities related to REST interfaces", "error")
else:
self.req = requests
def post(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http post method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http post", "info")
try:
response = self.req.post(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'post')
return status, response
def get(self, url, expected_response=None, params=None, auth=None, **kwargs):
"""performs a http get method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http get", "info")
try:
response = self.req.get(url, params=params, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'get')
return status, response
def put(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http put method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http put", "info")
try:
response = self.req.put(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'put')
return status, response
def patch(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http patch method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http patch", "info")
try:
response = self.req.patch(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'patch')
return status, response
def delete(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http delete method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http delete", "info")
try:
response = self.req.delete(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'delete')
return status, response
def options(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http options method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http options", "info")
try:
response = self.req.options(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'options')
return status, response
def head(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http head method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http head", "info")
try:
response = self.req.head(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'head')
return status, response
def cmp_response(self, response, expected_api_response,
expected_response_type, output_file,
generate_output_diff_file=True):
"""
Performs the comparison between api response
and expected_api_response
arguments:
1.response: API response getting from the data repository
2.expected_api_response : expected response which needs
to be compared given by the user.
3.expected_response_type: The type of the expected response.
It can be xml or json or text.
4.output_file: The file in which the difference will be written
if the responses are not equal.
5.generate_output_diff_file: If the responses does not match,
then generates an output file by writing the difference
to the file by default and if it set to False then doesnot
generate any file.
returns:
Returns True if the response matches with
the expected response else False.
"""
if response is not None and expected_api_response is not None:
if expected_response_type in response.headers['Content-Type']:
extracted_response = response.content
extension = Utils.rest_Utils.get_extension_from_path(expected_api_response)
if 'xml' in response.headers['Content-Type']:
try:
f = open(expected_api_response, 'r')
except IOError as exception:
if ".xml" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
status, sorted_file1, sorted_file2, output_file = \
Utils.xml_Utils.compare_xml(extracted_response, expected_api_response,
output_file, sorted_json=False)
elif 'json' in response.headers['Content-Type']:
try:
expected_api_response = JSON.load(open(expected_api_response, 'r'))
for key, value in expected_api_response.items():
# replacing the environment/repo variable with value in the verify json
dict_key_value = {key: value}
env_out = data_Utils.sub_from_env_var(dict_key_value)
details_dict = data_Utils.sub_from_data_repo(dict_key_value)
expected_api_response[key] = env_out[key]
expected_api_response[key] = details_dict[key]
except IOError as exception:
if ".json" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
expected_api_response = JSON.loads(expected_api_response)
extracted_response = JSON.loads(extracted_response)
status = self.json_utils.write_json_diff_to_file(
extracted_response, expected_api_response, output_file)
elif 'text' in response.headers['Content-Type']:
try:
f = open(expected_api_response, 'r')
expected_api_response = f.read()
f.close()
except IOError as exception:
if ".txt" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
status = Utils.string_Utils.text_compare(
extracted_response, expected_api_response, output_file)
if not status:
if not generate_output_diff_file:
os.remove(output_file)
else:
pNote("api_response and expected_api_response do not match", "error")
pNote("The difference between the responses is saved here:{0}".format(output_file), "info")
return status
else:
type_of_response = Utils.rest_Utils.\
get_type_of_api_response(response)
pNote("Expected response type is {0}".
format(expected_response_type), "info")
pNote("API response type is {0}".
format(type_of_response), "info")
pNote("api_response and expected_api_response"
" types do not match", "error")
return False
else:
return False
def cmp_content_response(self, datafile, system_name, response,
expected_api_response, expected_response_type,
comparison_mode):
"""
Performs the comparison between api response
and expected_api_response
arguments:
1. datafile: Datafile of the test case
2. system_name: Name of the system from the datafile
Pattern: String Pattern
Multiple Values: No
Max Numbers of Values Accepted: 1
Characters Accepted: All Characters
Other Restrictions: Should be valid system name
from the datafile
eg: http_system_1
3. response: API response getting from the data repository
4. expected_api_response : expected response which needs
to be compared given by the user.
5. expected_response_type: The type of the expected response.
It can be xml or json or text.
6. comparison_mode:
This is the mode in which you wish to compare
The supported comparison modes are
file, string, regex=expression, jsonpath=path, xpath=path
If you have given comparison_mode as file or string then
whole comparison will take place
If you wish to check content of expected response and
if it is only one value_check pass it in either data file
or test case file
If it is more than one value_check
then pass it in data file in comparison_mode and expected_api_response
tags under system
If it is xml response then you need to give xpath=path to it
If it is string response then you can pass regex=expressions
and you can leave expected_api_response empty
Ex for passing values in data file if it is json response
<comparison_mode>
<response_path>jsonpath=1.2.3</response_path>
<response_path>jsonpath=1.2</response_path>
</comparison_mode>
<expected_api_response>
<response_value>4</response_value>
<response_value>5</response_value>
</expected_api_response>
returns:
Returns True if the response matches with
the expected response else False.
"""
if expected_response_type in response.headers['Content-Type']:
extracted_response = response.content
if comparison_mode:
path_list = [comparison_mode]
responses_list = [expected_api_response]
else:
path_list, responses_list = Utils.xml_Utils.\
list_path_responses_datafile(datafile, system_name)
if path_list:
if "xml" in response.headers['Content-Type']:
status = Utils.xml_Utils.compare_xml_using_xpath(extracted_response,
path_list, responses_list)
elif "json" in response.headers['Content-Type']:
status = self.json_utils.compare_json_using_jsonpath(extracted_response,
path_list, responses_list)
else:
status = Utils.string_Utils.compare_string_using_regex(extracted_response,
path_list)
else:
print_error("Please provide the values for comparison_mode and "
"expected_api_response")
status = False
else:
type_of_response = Utils.rest_Utils.\
get_type_of_api_response(response)
pNote("Expected response type is {0}".
format(expected_response_type), "info")
pNote("API response type is {0}".
format(type_of_response), "info")
pNote("api_response and expected_api_response"
" types do not match", "error")
status = False
return status
@classmethod
def report_response_status(cls, status, expected_response, action):
"""Reports the response status of http
actions with a print message to the user"""
result = False
if expected_response is None or expected_response is False or \
expected_response == [] or expected_response == "":
pattern = re.compile('^2[0-9][0-9]$')
if pattern.match(str(status)) is not None:
pNote("http {0} successful".format(action), "info")
result = True
elif isinstance(expected_response, list):
for i in range(0, len(expected_response)):
if str(status) == expected_response[i]:
pNote("http {0} successful".format(action), "info")
result = True
elif str(status) == expected_response:
pNote("http {0} successful".format(action), "info")
result = True
if not result:
pNote("http {0} failed".format(action), "error")
return result
def catch_expection_return_error(self, exception_name, url):
""" Function for catching expections thrown by REST operations
"""
if exception_name.__class__.__name__ == self.req.exceptions.ConnectionError.__name__:
pNote("Max retries exceeded with URL {0}. Failed to establish a new connection.".
format(url), "error")
status = False
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.InvalidURL.__name__:
pNote("Could not process the request. {0} is somehow invalid.".format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.URLRequired.__name__:
pNote("Could not process the request. A valid URL is required to make a request.".
format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.MissingSchema.__name__:
pNote("Could not process the request. The URL schema (e.g. http or https) is missing.".
format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == ValueError.__name__:
pNote("Could not process the request. May be the value provided for timeout is "
"invalid or the schema is invalid.", "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.ConnectTimeout.__name__:
pNote("The request timed out while trying to connect to the remote server.", "error")
status = False
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.ReadTimeout.__name__:
pNote("The server did not send any data in the allotted amount of time.", "error")
status = False
response = None
else:
pNote("An Error Occurred: {0}".format(exception_name), "error")
status = False
response = None
return status, response
def check_connection(self, url, auth=None, **kwargs):
"""Internally uses the http options to check connection status.
i.e.
- If connection is successfull return a true
- if any ConnectionError is detected returns a False."""
try:
status = False
api_response = self.req.options(url, auth=auth, **kwargs)
if not str(api_response).startswith('2') or \
str(api_response).startswith('1'):
pNote("Connection was successful, but there was"\
"problem accessing the resource: {0}".format(url), "info")
status = False
except self.req.ConnectionError:
pNote("Connection to url is down: {0}".format(url), "debug")
except self.req.HTTPError:
pNote("Problem accessing resource: {0}".format(url), "debug")
else:
pNote("Connection to resource successfull: {0}".format(url), "debug")
status = True
return status
def update_output_dict(self, system_name, api_response, request_id, status, i):
"""
updates the output dictionary with status code and response object and text response
and placing another dictionary inside output dict and updating it with status code and content type
and extracted content from object and response object
"""
output_dict = {}
pNote("Total number of requests in this step: {0}".format(i))
pNote("This is request number: {0}".format(i))
pNote("status: {0}".format(status), "debug")
pNote("api_response: {0}".format(api_response), "debug")
output_dict["{0}_api_response".format(system_name)] = api_response
output_dict["{0}_api_response_object".format(system_name)] = api_response
if api_response is not None:
text = api_response.text
status_code = api_response.status_code
headers = api_response.headers
output_response = self.get_output_response(api_response)
history = api_response.history
else:
text = None
status_code = None
headers = None
output_response = None
history = None
output_dict["{0}_status".format(system_name)] = status_code
pNote("api_response_history: {0}".format(history), "debug")
if request_id is not None:
output_dict["{0}_{1}_api_response_object_{2}".format(system_name, request_id, i)] = api_response
output_dict["{0}_{1}_api_response_text_{2}".format(system_name, request_id, i)] = text
output_dict["{0}_{1}_api_response_status_{2}".format(system_name, request_id, i)] = status_code
output_dict["{0}_{1}_api_response_headers_{2}".format(system_name, request_id, i)] = headers
output_dict["{0}_{1}_api_response_content_{2}".format(system_name, request_id, i)] = output_response
output_dict["{0}_{1}_api_response_object".format(system_name, request_id)] = api_response
output_dict["{0}_{1}_api_response_text".format(system_name, request_id)] = text
output_dict["{0}_{1}_api_response_status".format(system_name, request_id)] = status_code
output_dict["{0}_{1}_api_response_headers".format(system_name, request_id)] = headers
output_dict["{0}_{1}_api_response_content".format(system_name, request_id)] = output_response
else:
output_dict["{0}_api_response_object_{1}".format(system_name, i)] = api_response
output_dict["{0}_api_response_text_{1}".format(system_name, i)] = text
output_dict["{0}_api_response_status_{1}".format(system_name, i)] = status_code
output_dict["{0}_api_response_headers_{1}".format(system_name, i)] = headers
output_dict["{0}_api_response_content_{1}".format(system_name, i)] = output_response
output_dict["{0}_api_response_object".format(system_name)] = api_response
output_dict["{0}_api_response_text".format(system_name)] = text
output_dict["{0}_api_response_status".format(system_name)] = status_code
output_dict["{0}_api_response_headers".format(system_name)] = headers
output_dict["{0}_api_response_content".format(system_name)] = output_response
return output_dict
@staticmethod
def get_output_response(api_response):
"""
This method is used to convert the given api_response in the form of text / xml / json
Params:
api_response : api_response
Returns:
ouptut_response in the form of text/xml/json
"""
if api_response is not None:
try:
output_response = parseString("".join(api_response.text))
except:
try:
JSON.loads(api_response.text)
except:
output_response = api_response.text.encode('ascii', 'ignore')
pNote("api_response Text: \n {0}".format(output_response))
else:
output_response = api_response.json()
pNote("api_response (JSON format): \n {0}".
format(JSON.dumps(output_response, indent=4)))
else:
pNote("api_response (XML format): \n {0}".
format(output_response.toprettyxml(newl='\n')))
else:
output_response = None
return output_response
def try_until_resource_status(self, url, auth=None, status="up", trials=5, **kwargs):
""" Tries to connect to the resource until resource
reaches the specified status. Tries for the number mentioned in the
trials parameter (default=5)
waits for a time of 30 seconds between trials
"""
final_status = False
if status.upper() == "UP":
expected_result = True
elif status.upper() == "DOWN":
expected_result = False
i = 1
while i <= trials:
pNote("Trial: {0}".format(i), "info")
result = self.check_connection(url, auth, **kwargs)
if result == expected_result:
final_status = True
break
i += 1
time.sleep(10)
return final_status
|
apache-2.0
| 2,335,298,934,855,537,700
| 47.096774
| 115
| 0.571077
| false
| 4.552502
| false
| false
| false
|
pelikanchik/edx-platform
|
cms/djangoapps/contentstore/views/item.py
|
1
|
18095
|
# -*- coding: utf-8 -*-
"""Views for items (modules)."""
import json
import logging
from uuid import uuid4
from functools import partial
from static_replace import replace_static_urls
from xmodule_modifiers import wrap_xblock
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from util.json_request import expect_json, JsonResponse
from util.string_utils import str_to_bool
from ..transcripts_utils import manage_video_subtitles_save
from ..utils import get_modulestore
from .access import has_access
from .helpers import _xmodule_recurse
from xmodule.x_module import XModuleDescriptor
from django.views.decorators.http import require_http_methods
from xmodule.modulestore.locator import BlockUsageLocator
from student.models import CourseEnrollment
from django.http import HttpResponseBadRequest
from xblock.fields import Scope
from preview import handler_prefix, get_preview_html
from edxmako.shortcuts import render_to_response, render_to_string
from models.settings.course_grading import CourseGradingModel
__all__ = ['orphan_handler', 'xblock_handler']
log = logging.getLogger(__name__)
# cdodge: these are categories which should not be parented, they are detached from the hierarchy
DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info']
CREATE_IF_NOT_FOUND = ['course_info']
# pylint: disable=unused-argument
@require_http_methods(("DELETE", "GET", "PUT", "POST"))
@login_required
@expect_json
def xblock_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for xblock requests.
DELETE
json: delete this xblock instance from the course. Supports query parameters "recurse" to delete
all children and "all_versions" to delete from all (mongo) versions.
GET
json: returns representation of the xblock (locator id, data, and metadata).
if ?fields=graderType, it returns the graderType for the unit instead of the above.
html: returns HTML for rendering the xblock (which includes both the "preview" view and the "editor" view)
PUT or POST
json: if xblock locator is specified, update the xblock instance. The json payload can contain
these fields, all optional:
:data: the new value for the data.
:children: the locator ids of children for this xblock.
:metadata: new values for the metadata fields. Any whose values are None will be deleted not set
to None! Absent ones will be left alone.
:nullout: which metadata fields to set to None
:graderType: change how this unit is graded
:publish: can be one of three values, 'make_public, 'make_private', or 'create_draft'
The JSON representation on the updated xblock (minus children) is returned.
if xblock locator is not specified, create a new xblock instance. The json playload can contain
these fields:
:parent_locator: parent for new xblock, required
:category: type of xblock, required
:display_name: name for new xblock, optional
:boilerplate: template name for populating fields, optional
The locator (and old-style id) for the created xblock (minus children) is returned.
"""
if package_id is not None:
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
if not has_access(request.user, locator):
raise PermissionDenied()
old_location = loc_mapper().translate_locator_to_location(locator)
if request.method == 'GET':
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
fields = request.REQUEST.get('fields', '').split(',')
if 'graderType' in fields:
# right now can't combine output of this w/ output of _get_module_info, but worthy goal
return JsonResponse(CourseGradingModel.get_section_grader_type(locator))
# TODO: pass fields to _get_module_info and only return those
rsp = _get_module_info(locator)
return JsonResponse(rsp)
else:
component = modulestore().get_item(old_location)
# Wrap the generated fragment in the xmodule_editor div so that the javascript
# can bind to it correctly
component.runtime.wrappers.append(partial(wrap_xblock, handler_prefix))
try:
content = component.render('studio_view').content
# catch exceptions indiscriminately, since after this point they escape the
# dungeon and surface as uneditable, unsaveable, and undeletable
# component-goblins.
except Exception as exc: # pylint: disable=W0703
log.debug("Unable to render studio_view for %r", component, exc_info=True)
content = render_to_string('html_error.html', {'message': str(exc)})
mod_class = component.__class__.__name__
current_module_class = 'other'
if "CapaDescriptor" in mod_class:
current_module_class = 'problem'
if "VideoDescriptor" in mod_class:
current_module_class = 'video'
return render_to_response('component.html', {
'preview': get_preview_html(request, component),
'module_class': current_module_class,
'editor': content
})
elif request.method == 'DELETE':
delete_children = str_to_bool(request.REQUEST.get('recurse', 'False'))
delete_all_versions = str_to_bool(request.REQUEST.get('all_versions', 'False'))
return _delete_item_at_location(old_location, delete_children, delete_all_versions)
else: # Since we have a package_id, we are updating an existing xblock.
return _save_item(
request,
locator,
old_location,
data=request.json.get('data'),
children=request.json.get('children'),
metadata=request.json.get('metadata'),
nullout=request.json.get('nullout'),
grader_type=request.json.get('graderType'),
publish=request.json.get('publish'),
)
elif request.method in ('PUT', 'POST'):
return _create_item(request)
else:
return HttpResponseBadRequest(
"Only instance creation is supported without a package_id.",
content_type="text/plain"
)
def _save_item(request, usage_loc, item_location, data=None, children=None, metadata=None, nullout=None,
grader_type=None, publish=None):
"""
Saves xblock w/ its fields. Has special processing for grader_type, publish, and nullout and Nones in metadata.
nullout means to truly set the field to None whereas nones in metadata mean to unset them (so they revert
to default).
The item_location is still the old-style location whereas usage_loc is a BlockUsageLocator
"""
store = get_modulestore(item_location)
try:
existing_item = store.get_item(item_location)
except ItemNotFoundError:
if item_location.category in CREATE_IF_NOT_FOUND:
# New module at this location, for pages that are not pre-created.
# Used for course info handouts.
store.create_and_save_xmodule(item_location)
existing_item = store.get_item(item_location)
else:
raise
except InvalidLocationError:
log.error("Can't find item by location.")
return JsonResponse({"error": "Can't find item by location: " + str(item_location)}, 404)
if publish:
if publish == 'make_private':
_xmodule_recurse(existing_item, lambda i: modulestore().unpublish(i.location))
elif publish == 'create_draft':
# This clones the existing item location to a draft location (the draft is
# implicit, because modulestore is a Draft modulestore)
modulestore().convert_to_draft(item_location)
if data:
store.update_item(item_location, data)
else:
data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)
if children is not None:
children_ids = [
loc_mapper().translate_locator_to_location(BlockUsageLocator(child_locator)).url()
for child_locator
in children
]
store.update_children(item_location, children_ids)
# cdodge: also commit any metadata which might have been passed along
if nullout is not None or metadata is not None:
# the postback is not the complete metadata, as there's system metadata which is
# not presented to the end-user for editing. So let's use the original (existing_item) and
# 'apply' the submitted metadata, so we don't end up deleting system metadata.
if nullout is not None:
for metadata_key in nullout:
setattr(existing_item, metadata_key, None)
# update existing metadata with submitted metadata (which can be partial)
# IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If
# the intent is to make it None, use the nullout field
if metadata is not None:
for metadata_key, value in metadata.items():
if metadata_key == "locator_term":
temp_key = "direct_term"
json_array = json.loads(value)
for x in json_array:
old_loc = str(loc_mapper().translate_locator_to_location(x["direct_element_id"]))
i = old_loc.rfind("/")
short_name = old_loc[i+1:]
x["direct_element_id"] = short_name
for every_edge in x["disjunctions"]:
for every_cond in every_edge["conjunctions"]:
old_loc = str(loc_mapper().translate_locator_to_location(every_cond["source_element_id"]))
i = old_loc.rfind("/")
short_name = old_loc[i+1:]
every_cond["source_element_id"] = short_name
temp_value = json.dumps(json_array)
else:
temp_key = metadata_key
temp_value = value
field = existing_item.fields[temp_key]
if temp_value is None:
field.delete_from(existing_item)
else:
try:
temp_value = field.from_json(temp_value)
except ValueError:
return JsonResponse({"error": "Invalid data"}, 400)
field.write_to(existing_item, temp_value)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
existing_item.save()
# commit to datastore
store.update_metadata(item_location, own_metadata(existing_item))
if existing_item.category == 'video':
manage_video_subtitles_save(existing_item, existing_item)
result = {
'id': unicode(usage_loc),
'data': data,
'metadata': own_metadata(existing_item)
}
if grader_type is not None:
result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type))
# Make public after updating the xblock, in case the caller asked
# for both an update and a publish.
if publish and publish == 'make_public':
_xmodule_recurse(
existing_item,
lambda i: modulestore().publish(i.location, request.user.id)
)
# Note that children aren't being returned until we have a use case.
return JsonResponse(result)
@login_required
@expect_json
def _create_item(request):
"""View for create items."""
parent_locator = BlockUsageLocator(request.json['parent_locator'])
parent_location = loc_mapper().translate_locator_to_location(parent_locator)
try:
category = request.json['category']
except KeyError:
category = 'problem'
display_name = request.json.get('display_name')
if not has_access(request.user, parent_location):
raise PermissionDenied()
parent = get_modulestore(category).get_item(parent_location)
dest_location = parent_location.replace(category=category, name=uuid4().hex)
# get the metadata, display_name, and definition from the request
metadata = {}
data = None
template_id = request.json.get('boilerplate')
if template_id is not None:
clz = XModuleDescriptor.load_class(category)
if clz is not None:
template = clz.get_template(template_id)
if template is not None:
metadata = template.get('metadata', {})
data = template.get('data')
if display_name is not None:
metadata['display_name'] = display_name
get_modulestore(category).create_and_save_xmodule(
dest_location,
definition_data=data,
metadata=metadata,
system=parent.system,
)
if category not in DETACHED_CATEGORIES:
get_modulestore(parent.location).update_children(parent_location, parent.children + [dest_location.url()])
course_location = loc_mapper().translate_locator_to_location(parent_locator, get_course=True)
locator = loc_mapper().translate_location(course_location.course_id, dest_location, False, True)
return JsonResponse({"locator": unicode(locator)})
def _delete_item_at_location(item_location, delete_children=False, delete_all_versions=False):
"""
Deletes the item at with the given Location.
It is assumed that course permissions have already been checked.
"""
store = get_modulestore(item_location)
item = store.get_item(item_location)
if delete_children:
_xmodule_recurse(item, lambda i: store.delete_item(i.location, delete_all_versions))
else:
store.delete_item(item.location, delete_all_versions)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_location, None)
for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc)
item_url = item_location.url()
if item_url in parent.children:
children = parent.children
children.remove(item_url)
parent.children = children
modulestore('direct').update_children(parent.location, parent.children)
return JsonResponse()
# pylint: disable=W0613
@login_required
@require_http_methods(("GET", "DELETE"))
def orphan_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
View for handling orphan related requests. GET gets all of the current orphans.
DELETE removes all orphans (requires is_staff access)
An orphan is a block whose category is not in the DETACHED_CATEGORY list, is not the root, and is not reachable
from the root via children
:param request:
:param package_id: Locator syntax package_id
"""
location = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
# DHM: when split becomes back-end, move or conditionalize this conversion
old_location = loc_mapper().translate_locator_to_location(location)
if request.method == 'GET':
if has_access(request.user, old_location):
return JsonResponse(modulestore().get_orphans(old_location, DETACHED_CATEGORIES, 'draft'))
else:
raise PermissionDenied()
if request.method == 'DELETE':
if request.user.is_staff:
items = modulestore().get_orphans(old_location, DETACHED_CATEGORIES, 'draft')
for item in items:
modulestore('draft').delete_item(item, True)
return JsonResponse({'deleted': items})
else:
raise PermissionDenied()
def _get_module_info(usage_loc, rewrite_static_links=True):
"""
metadata, data, id representation of a leaf module fetcher.
:param usage_loc: A BlockUsageLocator
"""
old_location = loc_mapper().translate_locator_to_location(usage_loc)
store = get_modulestore(old_location)
try:
module = store.get_item(old_location)
except ItemNotFoundError:
if old_location.category in CREATE_IF_NOT_FOUND:
# Create a new one for certain categories only. Used for course info handouts.
store.create_and_save_xmodule(old_location)
module = store.get_item(old_location)
else:
raise
data = module.data
if rewrite_static_links:
# we pass a partially bogus course_id as we don't have the RUN information passed yet
# through the CMS. Also the contentstore is also not RUN-aware at this point in time.
data = replace_static_urls(
module.data,
None,
course_id=module.location.org + '/' + module.location.course + '/BOGUS_RUN_REPLACE_WHEN_AVAILABLE'
)
# Note that children aren't being returned until we have a use case.
return {
'id': unicode(usage_loc),
'data': data,
'metadata': own_metadata(module)
}
|
agpl-3.0
| -2,731,989,271,063,219,000
| 42.186158
| 122
| 0.6336
| false
| 4.345581
| false
| false
| false
|
amureki/lunch-with-channels
|
places/migrations/0001_initial.py
|
1
|
1222
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 20:24
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import stdimage.models
import stdimage.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('image', stdimage.models.StdImageField(blank=True, null=True, upload_to=stdimage.utils.UploadToUUID(path='places'), verbose_name='Image')),
('address', models.CharField(max_length=255, verbose_name='Address')),
],
options={
'ordering': ('-created',),
},
),
]
|
mit
| -6,530,141,570,635,881,000
| 36.030303
| 156
| 0.615385
| false
| 4.199313
| false
| false
| false
|
smartboyathome/Wonderland-Engine
|
install_check_to_white_rabbit.py
|
1
|
1971
|
#!/usr/bin/env python2
"""
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
"""
import WhiteRabbit, os, sys, argparse, shutil, py_compile
class CannotOpenFile(Exception):
def __init__(self, directory):
self.directory = directory
def __repr__(self):
return "Cannot write a file at '{}'.".format(self.directory)
if __name__ == '__main__':
exit_code = 0
parser = argparse.ArgumentParser(description="A tool to help install checks into White Rabbit.")
parser.add_argument('python_file', nargs='*')
args = parser.parse_args()
check_dir = os.path.join(os.path.split(WhiteRabbit.__file__)[0], 'checks')
if not os.path.exists(check_dir) or not os.access(check_dir, os.W_OK):
raise CannotOpenFile(check_dir)
for f in args.python_file:
abspath = os.path.abspath(f)
if not os.path.exists(abspath) or not os.access(abspath, os.R_OK):
print "Could not read a file at '{}'.".format(abspath)
exit_code = 1
continue
path, name = os.path.split(abspath)
new_path = os.path.join(check_dir, name)
shutil.copy(abspath, new_path)
py_compile.compile(new_path)
sys.exit(exit_code)
|
agpl-3.0
| 2,859,696,470,728,600,000
| 39.244898
| 100
| 0.674277
| false
| 3.754286
| false
| false
| false
|
Andrwe/py3status
|
py3status/constants.py
|
1
|
7015
|
# This file contains various useful constants for py3status
GENERAL_DEFAULTS = {
"color_bad": "#FF0000",
"color_degraded": "#FFFF00",
"color_good": "#00FF00",
"color_separator": "#333333",
"colors": False,
"interval": 5,
"output_format": "i3bar",
}
MAX_NESTING_LEVELS = 4
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
TZTIME_FORMAT = "%Y-%m-%d %H:%M:%S %Z"
TIME_MODULES = ["time", "tztime"]
I3S_INSTANCE_MODULES = [
"battery",
"cpu_temperature",
"disk",
"ethernet",
"memory",
"path_exists",
"run_watch",
"tztime",
"volume",
"wireless",
]
I3S_SINGLE_NAMES = ["cpu_usage", "ddate", "ipv6", "load", "time"]
I3S_ALLOWED_COLORS = ["color_bad", "color_good", "color_degraded"]
# i3status modules that allow colors to be passed.
# general section also allows colors so is included.
I3S_COLOR_MODULES = ["general", "battery", "cpu_temperature", "disk", "load"]
I3S_MODULE_NAMES = I3S_SINGLE_NAMES + I3S_INSTANCE_MODULES
CONFIG_FILE_SPECIAL_SECTIONS = ["general", "py3status"]
ERROR_CONFIG = """
general {colors = true interval = 60}
order += "static_string py3status"
order += "tztime local"
order += "group error"
static_string py3status {format = "py3status"}
tztime local {format = "%c"}
group error{
button_next = 1
button_prev = 0
fixed_width = False
format = "{output}"
static_string error_min {format = "CONFIG ERROR" color = "#FF0000"}
static_string error {format = "$error" color = "#FF0000"}
}
"""
COLOR_NAMES_EXCLUDED = ["good", "bad", "degraded", "separator", "threshold", "None"]
COLOR_NAMES = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgray": "#A9A9A9",
"darkgrey": "#A9A9A9",
"darkgreen": "#006400",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategray": "#2F4F4F",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgray": "#D3D3D3",
"lightgrey": "#D3D3D3",
"lightgreen": "#90EE90",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
ON_TRIGGER_ACTIONS = ["refresh", "refresh_and_freeze"]
POSITIONS = ["left", "center", "right"]
RETIRED_MODULES = {
"nvidia_temp": {
"new": ["nvidia_smi"],
"msg": "Module {old} has been replaced with a module {new}.",
},
"scratchpad_async": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"scratchpad_counter": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title_async": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"weather_yahoo": {
"new": ["weather_owm"],
"msg": "Module {old} is no longer available due to retired Yahoo Weather APIs and new Oath requirements. You can try a different module {new}.",
},
"xkb_layouts": {
"new": ["xkb_input"],
"msg": "Module {old} has been replaced with a module {new} to support sway too.",
},
}
MARKUP_LANGUAGES = ["pango", "none"]
|
bsd-3-clause
| -4,436,011,993,799,610,400
| 26.727273
| 152
| 0.556522
| false
| 2.704318
| false
| false
| false
|
ikargis/horizon_fod
|
horizon/templatetags/sizeformat.py
|
1
|
2802
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for displaying sizes
"""
from django import template
from django.utils import formats
from django.utils import translation
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
return formats.number_format(round(value, 1), 1)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
return translation.ungettext_lazy("%(size)d byte",
"%(size)d bytes", 0) % {'size': 0}
if bytes < 1024:
return translation.ungettext_lazy("%(size)d",
"%(size)d", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return translation.ugettext_lazy("%s KB") % \
filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s MB") % \
filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s GB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s TB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return translation.ugettext_lazy("%s PB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(name='mbformat')
def mbformat(mb):
if not mb:
return 0
return filesizeformat(mb * 1024 * 1024, int_format).replace(' ', '')
@register.filter(name='mb_float_format')
def mb_float_format(mb):
"""Takes a size value in mb, and prints returns the data in a
saner unit.
"""
if not mb:
return 0
return filesizeformat(mb * 1024 * 1024, float_format)
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return filesizeformat(gb * 1024 * 1024 * 1024,
float_format).replace(' ', '')
|
apache-2.0
| -9,080,420,568,618,808,000
| 31.206897
| 78
| 0.654176
| false
| 3.859504
| false
| false
| false
|
zerotired/kotori
|
kotori/vendor/hydro2motion/database/mongo.py
|
2
|
4486
|
# -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
import txmongo
from autobahn.twisted.wamp import ApplicationRunner, ApplicationSession
from twisted.internet.defer import inlineCallbacks
from twisted.internet.interfaces import ILoggingContext
from twisted.python import log
from zope.interface.declarations import implementer
@implementer(ILoggingContext)
class MongoDatabaseService(ApplicationSession):
"""An application component for logging telemetry data to MongoDB databases"""
#@inlineCallbacks
#def __init__(self, config):
# ApplicationSession.__init__(self, config)
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
#@inlineCallbacks
def onJoin(self, details):
print("Realm joined (WAMP session started).")
# subscribe to telemetry data channel
self.subscribe(self.receive, u'de.elmyra.kotori.telemetry.data')
self.startDatabase()
#self.leave()
#@inlineCallbacks
def startDatabase(self):
#self.mongo = yield txmongo.MongoConnection(host='127.0.0.0', port=27017)
self.mongo = yield txmongo.MongoConnection()
def onLeave(self, details):
print("Realm left (WAMP session ended).")
ApplicationSession.onLeave(self, details)
def onDisconnect(self):
print("Transport disconnected.")
#reactor.stop()
#@inlineCallbacks
def receive(self, data):
#print "RECEIVE:", data
# decode wire data
payload = data.split(';')
try:
MSG_ID = int(payload[0])
V_FC = int(payload[1])
V_CAP = int(payload[2])
A_ENG = int(payload[3])
A_CAP = int(payload[4])
T_O2_In = int(payload[5])
T_O2_Out = int(payload[6])
T_FC_H2O_Out = int(payload[7])
Water_In = int(payload[8])
Water_Out = int(payload[9])
Master_SW = bool(payload[10])
CAP_Down_SW = bool(payload[11])
Drive_SW = bool(payload[12])
FC_state = bool(payload[13])
Mosfet_state = bool(payload[14])
Safety_state = bool(payload[15])
Air_Pump_load = float(payload[16])
Mosfet_load = int(payload[17])
Water_Pump_load = int(payload[18])
Fan_load = int(payload[19])
Acc_X = int(payload[20])
Acc_Y = int(payload[21])
Acc_Z = int(payload[22])
AUX = float(payload[23])
GPS_X = int(payload[24])
GPS_Y = int(payload[25])
GPS_Z = int(payload[26])
GPS_Speed = int(payload[27])
V_Safety = int(payload[28])
H2_Level = int(payload[29])
O2_calc = float(payload[30])
lat = float(payload[31])
lng = float(payload[32])
# store data to database
if self.mongo:
telemetry = self.mongo.kotori.telemetry
yield telemetry.insert(dict(MSG_ID = MSG_ID, V_FC = V_FC, V_CAP = V_CAP, A_ENG = A_ENG, A_CAP = A_CAP, T_O2_In = T_O2_In, T_O2_Out = T_O2_Out, T_FC_H2O_Out = T_FC_H2O_Out, Water_In = Water_In, Water_Out = Water_Out, Master_SW = Master_SW, CAP_Down_SW = CAP_Down_SW, Drive_SW = Drive_SW, FC_state = FC_state, Mosfet_state = Mosfet_state, Safety_state = Safety_state, Air_Pump_load = Air_Pump_load, Mosfet_load = Mosfet_load, Water_Pump_load = Water_Pump_load, Fan_load = Fan_load, Acc_X = Acc_X, Acc_Y = Acc_Y, Acc_Z = Acc_Z, AUX = AUX, GPS_X = GPS_X, GPS_Y = GPS_Y, GPS_Z = GPS_Z, GPS_Speed = GPS_Speed, V_Safety = V_Safety, H2_Level = H2_Level, O2_calc = O2_calc, lat = lat, lng = lng))
except ValueError:
print('Could not decode data: {}'.format(data))
def boot_mongo_database(websocket_uri, debug=False, trace=False):
print 'INFO: Starting mongo database service, connecting to broker', websocket_uri
runner = ApplicationRunner(websocket_uri, u'kotori-realm', debug=trace, debug_wamp=debug, debug_app=debug)
runner.run(MongoDatabaseService, start_reactor=False)
|
agpl-3.0
| 2,517,970,087,876,669,000
| 42.134615
| 703
| 0.569996
| false
| 3.453426
| false
| false
| false
|
enriquepablo/terms
|
terms/core/tests.py
|
1
|
2966
|
# Copyright (c) 2007-2012 by Enrique Pérez Arnaud <enriquepablo@gmail.com>
#
# This file is part of the terms project.
# https://github.com/enriquepablo/terms
#
# The terms project is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The terms project is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with any part of the terms project.
# If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from configparser import ConfigParser
import nose
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from terms.core.terms import Base
from terms.core.network import Network
from terms.core.compiler import KnowledgeBase
CONFIG = '''
[test]
dbms = postgresql://terms:terms@localhost
dbname = test
#dbms = sqlite://
#dbname = :memory:
time = normal
import =
instant_duration = 0
'''
def test_terms(): # test generator
# read contents of tests/
# feed each test to run_npl
d = os.path.dirname(sys.modules['terms.core'].__file__)
d = os.path.join(d, 'tests')
files = os.listdir(d)
config = ConfigParser()
config.read_string(CONFIG)
config = config['test']
for f in files:
if f.endswith('.test'):
address = '%s/%s' % (config['dbms'], config['dbname'])
engine = create_engine(address)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
Network.initialize(session)
kb = KnowledgeBase(session, config,
lex_optimize=False,
yacc_optimize=False,
yacc_debug=True)
yield run_terms, kb, os.path.join(d, f)
kb.session.close()
Base.metadata.drop_all(engine)
def run_terms(kb, fname):
# open file, read lines
# tell assertions
# compare return of questions with provided output
with open(fname) as f:
resp = kb.no_response
for sen in f:
sen = sen.rstrip()
if resp is not kb.no_response:
sen = sen.strip('.')
pmsg = 'returned "%s" is not "%s" at line %d for query: %s'
msg = pmsg % (resp, sen,
kb.parser.lex.lexer.lineno,
kb.parser.lex.lexer.lexdata)
nose.tools.assert_equals(sen, resp, msg=msg)
resp = kb.no_response
elif sen and not sen.startswith('#'):
resp = kb.process_line(sen)
|
gpl-3.0
| -4,042,667,240,511,713,300
| 32.693182
| 75
| 0.619224
| false
| 3.937583
| true
| false
| false
|
pmacosta/putil
|
sbin/compare_image_dirs.py
|
1
|
3856
|
#!/usr/bin/env python
# compare_image_dirs.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111
# Standard library imports
from __future__ import print_function
import argparse
import glob
import os
import sys
# PyPI imports
import numpy
import scipy
import scipy.misc
# Putil imports
import sbin.functions
###
# Functions
###
def compare_images(fname1, fname2, no_print=True, imgtol=1e-3):
""" Compare two images by calculating Manhattan and Zero norms """
# Source: http://stackoverflow.com/questions/189943/
# how-can-i-quantify-difference-between-two-images
for item in (fname1, fname2):
if not os.path.exists(item):
return False
img1 = scipy.misc.imread(fname1).astype(float)
img2 = scipy.misc.imread(fname2).astype(float)
if img1.size != img2.size:
m_norm, z_norm = 2*[2*imgtol]
else:
# Element-wise for Scipy arrays
diff = img1-img2
# Manhattan norm
m_norm = scipy.sum(numpy.abs(diff))
# Zero norm
z_norm = scipy.linalg.norm(diff.ravel(), 0)
result = bool((m_norm < imgtol) and (z_norm < imgtol))
if not no_print:
print(
'Image 1: {0}, Image 2: {1} -> ({2}, {3}) [{4}]'.format(
fname1, fname2, m_norm, z_norm, result
)
)
return result
def main(no_print, dir1, dir2):
""" Compare two images """
# pylint: disable=R0912
for item in [dir1, dir2]:
if not os.path.exists(item):
raise IOError('Directory {0} could not be found'.format(item))
dir1_images = set(
[
os.path.basename(item)
for item in glob.glob(os.path.join(dir1, '*.png'))
]
)
dir2_images = set(
[
os.path.basename(item)
for item in glob.glob(os.path.join(dir2, '*.png'))
]
)
yes_list = []
no_list = []
dir1_list = sorted(list(dir1_images-dir2_images))
dir2_list = sorted(list(dir2_images-dir1_images))
global_result = bool((not dir1_list) and (not dir2_list))
for image in sorted(list(dir1_images & dir2_images)):
result = compare_images(
os.path.join(dir1, image), os.path.join(dir2, image)
)
if (not result) and (not no_print):
no_list.append(image)
global_result = False
elif not no_print:
yes_list.append(image)
print('Files only in {0}'.format(dir1))
if dir1_list:
for item in dir1_list:
print(' {0}'.format(item))
else:
print(' None')
print('Files only in {0}'.format(dir2))
if dir2_list:
for item in dir2_list:
print(' {0}'.format(item))
else:
print(' None')
print('Matching files')
if yes_list:
for item in yes_list:
print(' {0}'.format(item))
else:
print(' None')
print('Mismatched files')
if no_list:
for item in no_list:
print(' {0}'.format(item))
else:
print(' None')
if global_result and (not no_print):
print(sbin.functions.pcolor('Directories ARE equal', 'green'))
elif (not global_result) and (not no_print):
print(sbin.functions.pcolor('Directories ARE NOT equal', 'red'))
sys.exit(1 if not global_result else 0)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Compare image directories'
)
PARSER.add_argument(
'-q', '--quiet',
help='suppress messages',
action="store_true",
default=False
)
PARSER.add_argument('dir1', help='First directory to compare', nargs=1)
PARSER.add_argument('dir2', help='Second directory to compare', nargs=1)
ARGS = PARSER.parse_args()
main(ARGS.quiet, ARGS.dir1[0], ARGS.dir2[0])
|
mit
| 1,650,487,592,812,506,000
| 29.125
| 76
| 0.582469
| false
| 3.373578
| false
| false
| false
|
kmshi/miroguide
|
channelguide/subscriptions/views.py
|
1
|
1962
|
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.views.decorators.cache import never_cache
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from channelguide import util
from channelguide.channels.models import Channel
from channelguide.channels.views import channel as channel_view
from channelguide.guide.views.firsttime import index as firsttime_index
from channelguide.subscriptions.models import Subscription
@never_cache
def subscribe_hit(request, id):
"""Used by our ajax call handleSubscriptionLink. It will get a security
error if we redirect it to a URL outside the channelguide, so we don't do
that
"""
ids = [id] + [int(k) for k in request.GET]
for id in ids:
channel = get_object_or_404(Channel, pk=id)
referer = request.META.get('HTTP_REFERER', '')
ignore_for_recommendations = False
if referer.startswith(settings.BASE_URL_FULL):
referer = util.chop_prefix(referer, settings.BASE_URL_FULL)
if not referer.startswith("/"):
referer = '/' + referer # make sure it starts with a slash
try:
resolved = resolve(referer)
except Resolver404:
pass
else:
if resolved is not None:
func, args, kwargs = resolved
if func == channel_view and args[0] != id:
ignore_for_recommendations = True
elif func == firsttime_index:
ignore_for_recommendations = True
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if ip == '127.0.0.1':
ip = request.META.get('HTTP_X_FORWARDED_FOR', '0.0.0.0')
Subscription.objects.add(
channel, ip,
ignore_for_recommendations=ignore_for_recommendations)
return HttpResponse("Hit successfull")
|
agpl-3.0
| -7,790,385,285,968,047,000
| 40.744681
| 77
| 0.634557
| false
| 4.183369
| false
| false
| false
|
vincent-noel/libSigNetSim
|
libsignetsim/model/math/DAE.py
|
1
|
2339
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from __future__ import print_function
from libsignetsim.model.math.MathFormula import MathFormula
from libsignetsim.model.math.sympy_shortcuts import SympySymbol, SympyEqual, SympyInteger
from sympy import solve, srepr, pretty
class DAE(object):
""" DAE class """
def __init__ (self, model):
""" Constructor of ode class """
self.__model = model
self.__definition = None
def new(self, definition):
self.__definition = definition
def getDefinition(self):
return self.__definition
def getFormula(self, rawFormula=True, developped=False):
if developped:
t_definition = self.__definition.getDeveloppedInternalMathFormula(rawFormula=rawFormula)
else:
t_definition = self.__definition.getInternalMathFormula(rawFormula=rawFormula)
return SympyEqual(
t_definition,
SympyInteger(0)
)
def __str__(self):
return "%s = 0" % str(self.__definition.getDeveloppedInternalMathFormula())
def pprint(self):
print(
pretty(
SympyEqual(
self.__definition.getDeveloppedInternalMathFormula(),
SympyInteger(0)
)
)
)
def solve(self):
to_solve = []
for var in self.__definition.getDeveloppedInternalMathFormula().atoms(SympySymbol):
variable = self.__model.listOfVariables.getBySymbol(var)
if variable is not None and variable.isAlgebraic():
to_solve.append(var)
return (to_solve[0], solve(self.__definition.getDeveloppedInternalMathFormula(), to_solve))
def isValid(self):
return self.__definition.getInternalMathFormula() != MathFormula.ZERO
|
gpl-3.0
| 506,651,916,005,281,900
| 25.885057
| 93
| 0.730654
| false
| 3.434655
| false
| false
| false
|
Wireless-Innovation-Forum/Spectrum-Access-System
|
src/harness/testcases/WINNF_FT_S_PPR_testcase.py
|
1
|
30790
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import logging
from concurrent.futures import ThreadPoolExecutor
import common_strings
from full_activity_dump_helper import getFullActivityDumpSasTestHarness, getFullActivityDumpSasUut
import sas
import sas_testcase
from sas_test_harness import SasTestHarnessServer, generateCbsdRecords, \
generatePpaRecords, generateCbsdReferenceId
import test_harness_objects
from util import winnforum_testcase, writeConfig, loadConfig, configurable_testcase, \
getRandomLatLongInPolygon, makePpaAndPalRecordsConsistent, \
addCbsdIdsToRequests, getCertFilename, getCertificateFingerprint, \
getFqdnLocalhost, getUnusedPort, json_load
from testcases.WINNF_FT_S_MCP_testcase import McpXprCommonTestcase
from reference_models.pre_iap_filtering import pre_iap_filtering
class PpaProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_PPR_1_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.1. """
# Load PPA record
ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
pal_record = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': []
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')
}]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_1_default_config)
def test_WINNF_FT_S_PPR_1(self, config_filename):
"""Single SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR1')
def generate_PPR_2_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.2. """
# Load PPA record
ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
pal_record = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests with overlapping frequency range for all devices
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3570000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3580000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3600000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = "test_fcc_id_e"
sas_test_harness_device_1['userId'] = "test_user_id_e"
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = "test_fcc_id_f"
sas_test_harness_device_2['userId'] = "test_user_id_f"
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = "test_fcc_id_g"
sas_test_harness_device_3['userId'] = "test_user_id_g"
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1],
[[grant_request_1]]
)
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_2], [grant_request_3]]
)
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config,
sas_test_harness_1_config],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}
]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_2_default_config)
def test_WINNF_FT_S_PPR_2(self, config_filename):
"""Multiple SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_PPR_3_default_config(self, filename):
"""High-level description of the default config:
SAS UUT has devices B, D; all of which have a PAL grant.
SAS TH has devices A, C, E, all of which have a PAL grant.
SAS UUT has one PPA, with devices B and D on the cluster list.
SAS TH has one PPA, with devices A, C, and E on the cluster list.
The PPAs derive from different but adjacent PALs.
Both PPAs are on 3620-3630 MHz, as are all grants.
"""
# Load Devices
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_a['installationParam']['latitude'] = 38.842176
device_a['installationParam']['longitude'] = -97.092863
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_b['installationParam']['latitude'] = 38.845323113
device_b['installationParam']['longitude'] = -97.15514587
device_b['installationParam']['antennaBeamwidth'] = 0
device_b['installationParam']['antennaDowntilt'] = 0
device_c = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_c['installationParam']['latitude'] = 38.816782
device_c['installationParam']['longitude'] = -97.102965
device_d = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
device_d['installationParam']['latitude'] = 38.846125
device_d['installationParam']['longitude'] = -97.156184
device_d['installationParam']['antennaBeamwidth'] = 0
device_d['installationParam']['antennaDowntilt'] = 0
device_e = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
device_e['installationParam']['latitude'] = 38.761748
device_e['installationParam']['longitude'] = -97.118459
# Pre-load conditionals and remove REG-conditional fields from registration
# requests.
conditional_keys = [
'cbsdCategory', 'fccId', 'cbsdSerialNumber', 'airInterface',
'installationParam', 'measCapability'
]
reg_conditional_keys = [
'cbsdCategory', 'airInterface', 'installationParam', 'measCapability'
]
conditionals_b = {key: device_b[key] for key in conditional_keys}
device_b = {
key: device_b[key]
for key in device_b
if key not in reg_conditional_keys
}
conditionals_d = {key: device_d[key] for key in conditional_keys}
device_d = {
key: device_d[key]
for key in device_d
if key not in reg_conditional_keys
}
# Load grant requests (default is 3620-3630).
grant_a = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_b = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_b['operationParam']['maxEirp'] = 30
grant_c = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_d = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_d['operationParam']['maxEirp'] = 30
grant_e = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
# CBSDs in SAS UUT.
domain_proxy = {
'registrationRequests': [device_b, device_d],
'grantRequests': [grant_b, grant_d],
'conditionalRegistrationData': [conditionals_b, conditionals_d],
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}
# One PPA in SAS UUT.
pal_low_frequency = 3620000000
pal_high_frequency = 3630000000
pal_record_0 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
ppa_record_0 = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
ppa_record_0['zone']['features'][0]['geometry']['coordinates'] = [[[
-97.155, 38.75
], [-97.155, 38.85], [-97.165, 38.85], [-97.165, 38.75], [-97.155, 38.75]]]
ppa_record_0, pal_records_0 = makePpaAndPalRecordsConsistent(
ppa_record_0, [pal_record_0], pal_low_frequency, pal_high_frequency,
'test_user_1')
device_b['userId'] = 'test_user_1'
device_d['userId'] = 'test_user_1'
ppa_cluster_list = [0, 1]
# One PPA in the peer SAS test harness.
pal_record_1 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_1['fipsCode'] = 20041084500
ppa_record_1 = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_1.json'))
ppa_record_1['zone']['features'][0]['geometry']['coordinates'] = [[[
-97.145, 38.85
], [-97.145, 38.75], [-97.05, 38.75], [-97.05, 38.85], [-97.145, 38.85]]]
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record_1, [pal_record_1], pal_low_frequency, pal_high_frequency,
'test_user_2')
# Generate FAD records.
cbsd_records = [device_a, device_c, device_e]
# Create CBSD reference IDs.
cbsd_reference_id_a = generateCbsdReferenceId(device_a['fccId'],
device_a['cbsdSerialNumber'])
cbsd_reference_id_c = generateCbsdReferenceId(device_c['fccId'],
device_c['cbsdSerialNumber'])
cbsd_reference_id_e = generateCbsdReferenceId(device_e['fccId'],
device_e['cbsdSerialNumber'])
cbsd_reference_ids = [[
cbsd_reference_id_a, cbsd_reference_id_c, cbsd_reference_id_e
]]
grant_record_list = [[grant_a], [grant_c], [grant_e]]
cbsd_records = generateCbsdRecords(cbsd_records, grant_record_list)
for cbsd in cbsd_records:
for grant in cbsd['grants']:
grant['channelType'] = 'PAL'
# Create records.
sas_harness_dump_records = {
'cbsdRecords': cbsd_records,
'ppaRecords': generatePpaRecords([ppa_record_1], cbsd_reference_ids),
}
# SAS test harness configuration.
sas_harness_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': 'certs/ca.cert'
}
config = {
'domainProxy':
domain_proxy, # Includes registration and grant requests.
'ppaRecord': ppa_record_0, # PPA in SAS UUT.
'ppaClusterList':
ppa_cluster_list, # Same format and semantics as SIQ.12.
'palRecords': [pal_records_0[0],
pal_records_1[0]], # PALs for both PPAs.
'sasTestHarnessDumpRecords':
sas_harness_dump_records, # CBSDs and one PPA.
'sasTestHarnessConfig':
sas_harness_config, # Just the config, no records.
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_3_default_config)
def test_WINNF_FT_S_PPR_3(self, config_filename):
config = loadConfig(config_filename)
# Light config checking.
self.assertValidConfig(
config, {
'domainProxy': dict,
'ppaRecord': dict,
'ppaClusterList': list,
'palRecords': list,
'sasTestHarnessDumpRecords': dict,
'sasTestHarnessConfig': dict
})
self.assertEqual(
len(config['sasTestHarnessDumpRecords']['ppaRecords']), 1,
'Only one PPA is supported.')
# Make sure ID formats are correct.
ppa = config['sasTestHarnessDumpRecords']['ppaRecords'][0]
self.assertGreater(
len(ppa['ppaInfo']['cbsdReferenceId']), 0,
'Must have at least one ID on the cluster list.')
for cbsd_ref_id in ppa['ppaInfo']['cbsdReferenceId']:
self.assertFalse(
cbsd_ref_id.startswith('cbsd/'),
'IDs in the cluster list should not start with "cbsd/".')
for cbsd in config['sasTestHarnessDumpRecords']['cbsdRecords']:
self.assertTrue(cbsd['id'].startswith('cbsd/'),
'IDs of individual CBSDs must start with "cbsd/".')
# Initialize test-wide variables, and state variables.
self.config = config
self.active_dpas = []
self.sas_test_harness_objects = []
self.domain_proxy_objects = []
self.protected_entity_records = {}
self.num_peer_sases = 1
self.cpas_executor = ThreadPoolExecutor(max_workers=1)
self.agg_interf_check_executor = ThreadPoolExecutor(max_workers=1)
self.sas_uut_fad = None
self.test_harness_fads = [] # List for consistency with MCP code.
self.all_dpa_checks_succeeded = True
# Notify SAS UUT that a peer SAS exists (and start the SAS server)
logging.info('Step 1: activate one SAS test harness and notify SAS UUT.')
test_harness = config['sasTestHarnessConfig']
logging.info('Creating SAS TH with config %s', test_harness)
# Initialize SAS Test Harness Server instance to dump FAD records
sas_test_harness_object = SasTestHarnessServer(
test_harness['sasTestHarnessName'], test_harness['hostName'],
test_harness['port'], test_harness['serverCert'],
test_harness['serverKey'], test_harness['caCert'])
self.InjectTestHarnessFccIds(
config['sasTestHarnessDumpRecords']['cbsdRecords'])
sas_test_harness_dump_records = [
config['sasTestHarnessDumpRecords']['cbsdRecords'],
config['sasTestHarnessDumpRecords']['ppaRecords']
]
sas_test_harness_object.writeFadRecords(sas_test_harness_dump_records)
# Start the server
sas_test_harness_object.start()
# Inform SAS UUT about SAS Test Harness.
certificate_hash = getCertificateFingerprint(test_harness['serverCert'])
self._sas_admin.InjectPeerSas({'certificateHash': certificate_hash,
'url': sas_test_harness_object.getBaseUrl()})
# Store required info in the test harness.
self.fad_cert = test_harness['serverCert']
self.fad_key = test_harness['serverKey']
self.sas_test_harness_objects.append(sas_test_harness_object)
# Extract PPA record from peer SAS and add to local protected entities.
peer_sas_ppa = config['sasTestHarnessDumpRecords']['ppaRecords'][0]
# The ID for each CBSD's record is of the format "cbsd/$REFERENCE_ID". The
# IDs on the cluster list are of the format "$REFERENCE_ID". Here we prepend
# "cbsd/" so that the values will be correctly matched in the zone purge
# reference model.
cluster_list = peer_sas_ppa['ppaInfo']['cbsdReferenceId']
for i in range(len(cluster_list)):
cluster_list[i] = 'cbsd/%s' % cluster_list[i]
self.protected_entity_records['ppaRecords'] = [peer_sas_ppa]
# Inject all PALs (used by SAS UUT PPA and peer SAS PPA)
logging.info('Step 2: inject PAL records.')
for index, pal_record in enumerate(config['palRecords']):
try:
logging.info('Injecting PAL record #%d', index)
self._sas_admin.InjectPalDatabaseRecord(pal_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
self.protected_entity_records['palRecords'] = config['palRecords']
# Register, inject PPA, and request grants.
logging.info('Steps 3 - 5: register, inject PPA, request grants.')
domain_proxy_config = config['domainProxy']
domain_proxy = test_harness_objects.DomainProxy(self,
domain_proxy_config['cert'],
domain_proxy_config['key'])
self.domain_proxy_objects.append(domain_proxy)
(sas_uut_ppa_record_with_cbsd_ids, sas_uut_ppa_record_with_reference_ids
) = domain_proxy.registerCbsdsAndRequestGrantsWithPpa(
domain_proxy_config['registrationRequests'],
domain_proxy_config['grantRequests'], config['ppaRecord'],
config['ppaClusterList'],
domain_proxy_config['conditionalRegistrationData'])
# Make sure SAS UUT's PPA is also checked for protection.
# At this point, we use the "with reference IDs" version because the pre-IAP
# filtering code compares against the CBSD reference ID in the FAD.
self.protected_entity_records['ppaRecords'].append(
sas_uut_ppa_record_with_reference_ids)
# FAD exchange.
logging.info('Step 6 + 7: FAD exchange.')
self.sas_uut_fad = getFullActivityDumpSasUut(self._sas, self._sas_admin,
self.fad_cert, self.fad_key)
self.test_harness_fads.append(
getFullActivityDumpSasTestHarness(
self.sas_test_harness_objects[0].getSasTestHarnessInterface()))
# Trigger CPAS in SAS UUT, and wait until completion.
logging.info('Step 8: trigger CPAS.')
self.cpas = self.cpas_executor.submit(
self.TriggerDailyActivitiesImmediatelyAndWaitUntilComplete)
logging.info('Step 9: execute IAP reference model.')
# Pre-IAP filtering.
pre_iap_filtering.preIapReferenceModel(self.protected_entity_records,
self.sas_uut_fad,
self.test_harness_fads)
# IAP reference model.
self.performIap()
logging.info('Waiting for CPAS to complete (started in step 8).')
self.cpas.result()
logging.info('CPAS started in step 8 complete.')
# Heartbeat, relinquish, grant, heartbeat
logging.info('Steps 10 - 13: heartbeat, relinquish, grant, heartbeat.')
domain_proxy.performHeartbeatAndUpdateGrants()
# Aggregate interference check
logging.info(
'Step 14 and CHECK: calculating and checking aggregate interference.')
# Before performing this check, we need to update the cluster list of SAS
# UUT's PPA to use the CBSD IDs -- rather than reference IDs -- since this
# is what the function getAuthorizedGrantsFromDomainProxies() expects. Note
# that we must keep the indexing the same since
# self.ppa_ap_iap_ref_values_list assumes consistent ordering of protected
# entities.
self.protected_entity_records['ppaRecords'][
1] = sas_uut_ppa_record_with_cbsd_ids
self.performIapAndDpaChecks()
|
apache-2.0
| 6,128,459,033,703,096,000
| 41.120383
| 98
| 0.64102
| false
| 3.636901
| true
| false
| false
|
h4/fuit-webdev
|
projects/logger/logger/settings.py
|
1
|
5341
|
# Django settings for logger project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ocvuca%yvz(24m0kl%9v9fa08q1w+k*9qz_5wu2efclb04bb3o'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'logger.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'logger.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, '../templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'testhttp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
mit
| 1,071,628,346,005,622,400
| 33.458065
| 101
| 0.681333
| false
| 3.742817
| false
| false
| false
|
daniestevez/gr-satellites
|
tools/clang_format.py
|
1
|
26734
|
#!/usr/bin/env python
# Copyright (C) 2015,2016 MongoDB Inc.
# Copyright (C) 2018 Free Software Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A script that provides:
1. Validates clang-format is the right version.
2. Has support for checking which files are to be checked.
3. Supports validating and updating a set of files to the right coding style.
"""
import queue
import difflib
import glob
import itertools
import os
import re
import subprocess
from subprocess import check_output, CalledProcessError
import sys
import threading
import time
from distutils import spawn
from argparse import ArgumentParser
from multiprocessing import cpu_count
# Get relative imports to work when
# the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "10.0.1"
CLANG_FORMAT_SHORT_VERSION = "10.0"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# only valid c/c++ implementations and headers
files_match = re.compile('\\.(h|cc|c)$')
##############################################################################
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args).decode('utf-8')
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formatting an individual file
"""
def __init__(self, path):
self.path = None
clang_format_progname_ext = ""
if sys.platform == "win32":
clang_format_progname_ext += ".exe"
# Check the clang-format the user specified
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the users' PATH environment variable now
if self.path is None:
# Check for various versions staring with binaries with version specific suffixes in the
# user's path
programs = [
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
CLANG_FORMAT_PROGNAME,
]
if sys.platform == "win32":
for i in range(len(programs)):
programs[i] += '.exe'
for program in programs:
self.path = spawn.find_executable(program)
if self.path:
if not self._validate_version():
self.path = None
else:
break
# If Windows, try to grab it from Program Files
# Check both native Program Files and WOW64 version
if sys.platform == "win32":
programfiles = [
os.environ["ProgramFiles"],
os.environ["ProgramFiles(x86)"],
]
for programfile in programfiles:
win32bin = os.path.join(programfile,
"LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
break
if self.path is None or not os.path.isfile(
self.path) or not self._validate_version():
print(
"ERROR:clang-format not found in $PATH, please install clang-format "
+ CLANG_FORMAT_VERSION)
raise NameError("No suitable clang-format found")
self.print_lock = threading.Lock()
def _validate_version(self):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.path, "--version"])
if CLANG_FORMAT_VERSION in cf_version:
return True
print(
"WARNING: clang-format found in path, but incorrect version found at "
+ self.path + " with version: " + cf_version)
return False
def _lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
with open(file_name, 'rb') as original_text:
original_file = original_text.read().decode("utf-8")
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed when printed to the screen
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s"
% (self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def lint(self, file_name):
"""Check the specified file has the correct format
"""
return self._lint(file_name, print_diff=True)
def format(self, file_name):
"""Update the format of the specified file
"""
if self._lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call(
[self.path, "--style=file", "-i", file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it formats foo.cpp
# on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
# with pp_lock:
# pp_result[0] = False
print("{} failed on item {}".format(func, item))
# pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']).rstrip().decode('utf-8')
except CalledProcessError:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
# Get a list of modules
# GNU Radio is a single-git repo
# paths = [os.path.join(base_dir, MODULE_DIR, m) for m in gnuradio_modules]
paths = [base_dir]
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo([
'git', '--git-dir', os.path.join(self.path, ".git"), '--work-tree',
self.path
] + args)
def _callgit(self, args):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return subprocess.call([
'git', '--git-dir', os.path.join(self.path, ".git"), '--work-tree',
self.path
] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(
set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate list
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [
line.rstrip()
for line in gito.splitlines()
# TODO: exclude directories if needed
# We don't want to lint volk
if not "volk" in line
]
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached", "--others"])
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def is_detached(self):
"""Is the current working tree in a detached HEAD state?
"""
# symbolic-ref returns 1 if the repo is in a detached HEAD state
return self._callgit(["symbolic-ref", "--quiet", "HEAD"])
def is_ancestor(self, parent, child):
"""Is the specified parent hash an ancestor of child hash?
"""
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(
["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
"""Is the specified hash a valid git commit?
"""
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
"""Does the current working tree have changes?
"""
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
"""Does the branch exist?
"""
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", branch])
def get_merge_base(self, commit):
"""Get the merge base between 'commit' and HEAD
"""
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command):
"""git add wrapper
"""
return self._callgito(["add"] + command)
def checkout(self, command):
"""git checkout wrapper
"""
return self._callgito(["checkout"] + command)
def commit(self, command):
"""git commit wrapper
"""
return self._callgito(["commit"] + command)
def diff(self, command):
"""git diff wrapper
"""
return self._callgito(["diff"] + command)
def log(self, command):
"""git log wrapper
"""
return self._callgito(["log"] + command)
def rev_parse(self, command):
"""git rev-parse wrapper
"""
return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command):
"""git rm wrapper
"""
return self._callgito(["rm"] + command)
def show(self, command):
"""git show wrapper
"""
return self._callgito(["show"] + command)
def get_list_from_lines(lines):
""""Convert a string containing a series of lines into a list of strings
"""
return [line.rstrip() for line in lines.splitlines()]
def get_files_to_check_working_tree():
"""Get a list of files to check form the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_working_tree_candidates() for r in repos]))
return valid_files
def get_files_to_check():
"""Get a list of files that need to be checked
based on which files are managed by git.
"""
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable([r.get_candidates(None) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""
Take a patch file generated by git diff,
and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(
r"^diff --git a\/([a-z\/\.\-_0-9]+) b\/[a-z\/\.\-_0-9]+")
candidates = []
for patch in patches:
if patch == "-":
infile = sys.stdin
else:
infile = open(patch, "rb")
candidates.extend([
check.match(line).group(1) for line in infile.readlines()
if check.match(line)
])
infile.close()
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_candidates(candidates) for r in repos]))
return valid_files
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return False
lint_clean = parallel_process([os.path.abspath(f) for f in files],
clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint(args):
"""Lint files command entry point
"""
if args.patch and args.all:
print("Only specify patch or all, but not both!")
return False
if args.patch:
files = get_files_to_check_from_patch(args.patch)
elif args.all:
files = get_files_to_check_working_tree()
else:
files = get_files_to_check()
if files:
_lint_files(args.clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return (False)
format_clean = parallel_process([os.path.abspath(f) for f in files],
clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def _reformat_branch(clang_format, commit_prior_to_reformat,
commit_after_reformat):
"""Reformat a branch made before a clang-format run
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return False
if os.getcwd() != get_base_dir():
raise ValueError("reformat-branch must be run from the repo root")
repo = Repo(get_base_dir())
# Validate that user passes valid commits
if not repo.is_commit(commit_prior_to_reformat):
raise ValueError(
"Commit Prior to Reformat '%s' is not a valid commit in this repo"
% commit_prior_to_reformat)
if not repo.is_commit(commit_after_reformat):
raise ValueError(
"Commit After Reformat '%s' is not a valid commit in this repo" %
commit_after_reformat)
if not repo.is_ancestor(commit_prior_to_reformat, commit_after_reformat):
raise ValueError((
"Commit Prior to Reformat '%s' is not a valid ancestor of Commit After"
+ " Reformat '%s' in this repo") % (commit_prior_to_reformat,
commit_after_reformat))
# Validate the user is on a local branch that has the right merge base
if repo.is_detached():
raise ValueError(
"You must not run this script in a detached HEAD state")
# Validate the user has no pending changes
if repo.is_working_tree_dirty():
raise ValueError(
"Your working tree has pending changes. You must have a clean working tree before proceeding."
)
merge_base = repo.get_merge_base(commit_prior_to_reformat)
if not merge_base == commit_prior_to_reformat:
raise ValueError(
"Please rebase to '%s' and resolve all conflicts before running this script"
% (commit_prior_to_reformat))
# We assume the target branch is master, it could be a different branch if needed for testing
merge_base = repo.get_merge_base("master")
if not merge_base == commit_prior_to_reformat:
raise ValueError(
"This branch appears to already have advanced too far through the merge process"
)
# Everything looks good so lets start going through all the commits
branch_name = repo.get_branch_name()
new_branch = "%s-reformatted" % branch_name
if repo.does_branch_exist(new_branch):
raise ValueError(
"The branch '%s' already exists. Please delete the branch '%s', or rename the current branch."
% (new_branch, new_branch))
commits = get_list_from_lines(
repo.log([
"--reverse", "--pretty=format:%H", "%s..HEAD" %
commit_prior_to_reformat
]))
previous_commit_base = commit_after_reformat
# Go through all the commits the user made on the local branch and migrate to a new branch
# that is based on post_reformat commits instead
for commit_hash in commits:
repo.checkout(["--quiet", commit_hash])
deleted_files = []
# Format each of the files by checking out just a single commit from the user's branch
commit_files = get_list_from_lines(repo.diff(["HEAD~", "--name-only"]))
for commit_file in commit_files:
# Format each file needed if it was not deleted
if not os.path.exists(commit_file):
print(
"Skipping file '%s' since it has been deleted in commit '%s'"
% (commit_file, commit_hash))
deleted_files.append(commit_file)
continue
if files_match.search(commit_file):
clang_format.format(commit_file)
else:
print(
"Skipping file '%s' since it is not a file clang_format should format"
% commit_file)
# Check if anything needed reformatting, and if so amend the commit
if not repo.is_working_tree_dirty():
print("Commit %s needed no reformatting" % commit_hash)
else:
repo.commit(["--all", "--amend", "--no-edit"])
# Rebase our new commit on top the post-reformat commit
previous_commit = repo.rev_parse(["HEAD"])
# Checkout the new branch with the reformatted commits
# Note: we will not name as a branch until we are done with all commits on the local branch
repo.checkout(["--quiet", previous_commit_base])
# Copy each file from the reformatted commit on top of the post reformat
diff_files = get_list_from_lines(
repo.diff([
"%s~..%s" % (previous_commit, previous_commit), "--name-only"
]))
for diff_file in diff_files:
# If the file was deleted in the commit we are reformatting, we need to delete it again
if diff_file in deleted_files:
repo.rm([diff_file])
continue
if "volk" in diff_file:
continue
# The file has been added or modified, continue as normal
file_contents = repo.show(["%s:%s" % (previous_commit, diff_file)])
root_dir = os.path.dirname(diff_file)
if root_dir and not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(diff_file, "w+") as new_file:
new_file.write(file_contents)
repo.add([diff_file])
# Create a new commit onto clang-formatted branch
repo.commit(["--reuse-message=%s" % previous_commit])
previous_commit_base = repo.rev_parse(["HEAD"])
# Create a new branch to mark the hashes we have been using
repo.checkout(["-b", new_branch])
print("reformat-branch is done running.\n")
print(
"A copy of your branch has been made named '%s', and formatted with clang-format.\n"
% new_branch)
print("The original branch has been left unchanged.")
print("The next step is to rebase the new branch on 'master'.")
def format_func(args):
"""Format files command entry point
"""
if args.all and args.branch is not None:
print("Only specify branch or all, but not both!")
return False
if not args.branch:
if args.all:
files = get_files_to_check_working_tree()
else:
files = get_files_to_check()
_format_files(args.clang_format, files)
else:
_reformat_branch(args.clang_format, *args.branch)
def parse_args():
"""
Parse commandline arguments
"""
parser = ArgumentParser()
parser.add_argument(
"-c",
"--clang-format",
default="clang-format",
help="clang-format binary")
subparsers = parser.add_subparsers(help="clang-format action", dest="action")
subparsers.required = True
lint_parser = subparsers.add_parser(
"lint", help="Lint-only (no modifications)")
lint_parser.add_argument("-a", "--all", action="store_true")
lint_parser.add_argument("-p", "--patch", help="patch to check")
lint_parser.set_defaults(func=lint)
format_parser = subparsers.add_parser(
"format", help="Format files in place")
format_parser.add_argument(
"-b",
"--branch",
nargs=2,
default=None,
help="specify the commit hash before the format and after the format has been done"
)
format_parser.add_argument("-a", "--all", action="store_true")
format_parser.set_defaults(func=format_func)
return parser.parse_args()
def main():
"""Main entry point
"""
args = parse_args()
if hasattr(args, "func"):
args.func(args)
if __name__ == "__main__":
main()
|
gpl-3.0
| -4,323,851,137,709,461,500
| 31.404848
| 106
| 0.584312
| false
| 4.172624
| false
| false
| false
|
ruthbrenk/DrugPred2.0
|
cl_startdock.py
|
1
|
1537
|
#!/usr/bin/python
#run as array job
#if files are bzipped -> copy to local disk and unzip there
import os,sys
db_dir = sys.argv[1]
zipped = sys.argv[2] #are files bzipped?
if zipped == 'True':
zipped = True
else:
zipped = False
files = os.listdir(db_dir)
file = '#$ -S /bin/tcsh\n#$ -cwd\n#$ -V\n'
path = os.getcwd()
counter = 1
for i in files: #set up all sub dirs with correct input files
if i[-2:] == 'db' or 'z2':
sub_dir = 'acd_' + str(counter)
if not os.path.exists(sub_dir):
os.system('mkdir ' + sub_dir)
os.chdir(path + '/' + sub_dir)
os.system('cp ../INDOCK .')
command = 'ln -s ' + db_dir + i + ' db_file'
print command
os.system(command)
counter = counter + 1
os.chdir('..')
#create file to submit array job
start_file = open('start_dock.bin', 'w')
start_file.write(file)
start_file.write('cd acd_$SGE_TASK_ID\n')
if zipped: #files must be unzipped, to save diskspace to do this on temporary cluster disk, $TMPDIR
start_file.write('ls -larth *\n') #save name of db file that should be docked
start_file.write('cp db_file $TMPDIR/db_file.db.bz2\n')
start_file.write('bunzip2 $TMPDIR/db_file.db.bz2\n')
start_file.write('unlink db_file\n')
start_file.write('ln -s $TMPDIR/db_file.db db_file\n')
start_file.write('/software/dockenv/bin/Linux/dock_vol.test\n')
if zipped:
start_file.write('unlink db_file\n')
start_file.write('rm -f *.1')
start_file.close()
os.system('chmod 755 start_dock.bin')
os.system('qsub -q 64bit-pri.q,64bit.q -t 1-' + str(counter-1) + ' start_dock.bin')
|
gpl-2.0
| 5,979,030,986,141,396,000
| 25.050847
| 100
| 0.659727
| false
| 2.515548
| false
| false
| false
|
overfl0/Bulletproof-Arma-Launcher
|
src/view/messagebox.py
|
1
|
1628
|
# Bulletproof Arma Launcher
# Copyright (C) 2016 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import sys
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from utils import browser
from view.chainedpopup import ChainedPopup
default_title = """Message"""
def open_hyperlink(obj, ref):
browser.open_hyperlink(ref)
class MessageBox(ChainedPopup):
def __init__(self, text, title=default_title, markup=False, on_dismiss=None,
hide_button=False, auto_dismiss=True):
bl = BoxLayout(orientation='vertical')
la = Label(text=text, size_hint_y=0.8, markup=markup)
la.bind(on_ref_press=open_hyperlink)
button = Button(text="Ok", size_hint_y=0.2)
button.bind(on_release=self.dismiss)
bl.add_widget(la)
if not hide_button:
bl.add_widget(button)
super(MessageBox, self).__init__(
title=title, content=bl, size_hint=(None, None), size=(600, 500),
auto_dismiss=auto_dismiss)
# Bind an optional handler when the user closes the message
if on_dismiss:
self.bind(on_dismiss=on_dismiss)
|
gpl-3.0
| -136,472,453,782,485,700
| 32.22449
| 80
| 0.693489
| false
| 3.733945
| false
| false
| false
|
DISMGryphons/GryphonCTF2017-Challenges
|
challenges/programming/AutoEncryptSys/generate/make.py
|
1
|
1166
|
from Crypto.Cipher import AES
import base64
import random
k="../distrib/"
def randomword(length):
return ''.join(random.choice("QWERTYUIOPASDFGHJKLZXCVBNM1234567890__________") for i in range(length))
def randomword1():
return ''.join(random.choice("QWERTYUIOPLKJHGFDSAZXCVBNM") for i in range(4))
def filename():
return ''.join(random.choice("asdfghjklzxcvbnmqwertyuiopQWERTYUIOPASDFGHJKLZXCVBNM") for i in range(16))
def encrypt(msg_text,secret_key):
msg_text = msg_text.rjust(32)
cipher = AES.new(secret_key,AES.MODE_ECB) # never use ECB in strong systems obviously
encoded = base64.b64encode(cipher.encrypt(msg_text))
return encoded.decode("utf-8")
# ...
def decrypt(msg_text,secret_key):
cipher = AES.new(secret_key,AES.MODE_ECB) # never use ECB in strong systems obviously
decoded = cipher.decrypt(base64.b64decode(msg_text))
return decoded
for i in range(1002):
zz=filename()
f=open(k+zz,"w")
D=randomword1()
while D=="GCTF":
D=randomword1()
j=D+"{"+randomword(random.randint(17,25))+"}"
if i==459:
j="GCTF{wh4ts_1n_th3_f1l355}"
print (encrypt(j,zz))
print(zz)
print()
print(encrypt(j,zz),file=f)
|
gpl-3.0
| 4,262,148,654,617,338,000
| 28.15
| 107
| 0.702401
| false
| 2.692841
| false
| false
| false
|
amcgregor/WebCore-Tutorial
|
web/app/wiki/root.py
|
1
|
2314
|
# Python's standard date + time object.
from datetime import datetime
# HTTP status code exception for "302 Found" redirection.
from webob.exc import HTTPFound
# MongoDB exceptions that may be raised when manipulating data.
from pymongo.errors import DuplicateKeyError
# Get a reference to our Article resource class and data model.
from .article import Article
from .model import WikiArticle as D # Shortened due to to repeated use.
class Wiki:
"""Basic multi-article editable wiki."""
__dispatch__ = 'resource' # The Wiki is a collection of pages, so use resource dispatch.
__resource__ = Article # Declare the type of resource we contain.
__collection__ = 'articles' # The default collection name to use when bound.
__home__ = 'Home' # The default homepage users are directed to if requesting the root.
def __init__(self, context, collection=None, record=None):
"""Executed when the root of the site (or children) are accessed, on each request."""
self._ctx = context # Store the "request context" for later use.
self.__collection__ = context.db[self.__collection__] # Get a reference to the collection we use.
def __getitem__(self, name):
"""Load data for the Article with the given name."""
# Attempt to locate a document by that name.
data = self.__collection__.find_one(D.name == name)
if not data: # If no record was found, populate some default data.
data = D(name) # Creation and modification times are constructed for us.
else:
data = D.from_mongo(data) # Otherwise, wrap in our model object.
return data
def get(self):
"""Called to handle direct requests to the web root itself."""
# Redirect users to the default home page.
return HTTPFound(location=str(self._ctx.path.current / self.__home__))
def post(self, name, content):
"""Save a new article to the database."""
try:
# Insert an article with the given name and content.
result = self.__collection__.insert_one(D(name, content))
except DuplicateKeyError:
return {
'ok': False,
'reason': 'duplicate',
'message': "An article with that name already exists.",
'name': name,
}
# All is well, so we inform the client.
return {
'ok': True,
'acknowledged': result.acknowledged,
'name': result.inserted_id
}
|
mit
| -5,330,814,030,077,170,000
| 32.536232
| 100
| 0.689283
| false
| 3.714286
| false
| false
| false
|
timesqueezer/mdfork
|
mooddiary/bundles.py
|
1
|
1999
|
from flask.ext.assets import Bundle
js = Bundle(
'bower_components/jquery/dist/jquery.js',
'bower_components/angular/angular.js',
'bower_components/angular-animate/angular-animate.js',
'bower_components/angular-cookies/angular-cookies.js',
'bower_components/angular-sanitize/angular-sanitize.js',
'bower_components/angular-localization/angular-localization.js',
'bower_components/angular-ui-router/release/angular-ui-router.js',
'bower_components/angular-grecaptcha/grecaptcha.js',
'bower_components/underscore/underscore.js',
'bower_components/angular-strap/dist/angular-strap.js',
'bower_components/angular-strap/dist/angular-strap.tpl.js',
'bower_components/Chart.js/Chart.js',
'bower_components/angular-chart.js/dist/angular-chart.js',
'bower_components/bootstrap/js/alert.js',
'bower_components/bootstrap/js/modal.js',
'bower_components/bootstrap/js/dropdown.js',
'bower_components/bootstrap/js/collapse.js',
'bower_components/angular-restmod/dist/angular-restmod-bundle.js',
'bower_components/angular-restmod/dist/plugins/dirty.js',
'bower_components/ngInfiniteScroll/build/ng-infinite-scroll.js',
'bower_components/ngSmoothScroll/lib/angular-smooth-scroll.js',
'bower_components/moment/moment.js',
'bower_components/Chart.Scatter/Chart.Scatter.js',
'angular-locale_de-de.js',
'bower_components/spectrum/spectrum.js',
'bower_components/angular-spectrum-colorpicker/dist/angular-spectrum-colorpicker.js',
'js/utils.js',
'js/diary.js',
'js/app.js',
output='gen/app.js',
filters='rjsmin'
)
css = Bundle(
'css/styles.less',
'bower_components/angular-chart.js/dist/angular-chart.css',
'bower_components/bca-flag-sprite/css/flags.css',
'bower_components/fontawesome/css/font-awesome.min.css',
'bower_components/flag-icon-css/css/flag-icon.css',
'bower_components/spectrum/spectrum.css',
output='gen/styles.css',
filters='less,cssmin'
)
|
mit
| 5,983,798,227,055,680,000
| 38.196078
| 89
| 0.728364
| false
| 3.065951
| false
| true
| false
|
newmediamedicine/indivo_server_1_0
|
indivo/views/reports/immunization.py
|
1
|
2097
|
"""
.. module:: views.reports.immunization
:synopsis: Indivo view implementations for the immunization report.
.. moduleauthor:: Daniel Haas <daniel.haas@post.harvard.edu>
.. moduleauthor:: Ben Adida <ben@adida.net>
"""
from django.http import HttpResponseBadRequest, HttpResponse
from indivo.lib.view_decorators import marsloader, DEFAULT_ORDERBY
from indivo.lib.query import FactQuery, DATE, STRING, NUMBER
from indivo.models import Immunization
IMMUNIZATION_FILTERS = {
'vaccine_type' : ('vaccine_type', STRING),
'date_administered': ('date_administered', DATE),
DEFAULT_ORDERBY : ('created_at', DATE)
}
IMMUNIZATION_TEMPLATE = 'reports/immunization.xml'
def immunization_list(*args, **kwargs):
""" List the immunization data for a given record.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.immunization._immunization_list`.
"""
return _immunization_list(*args, **kwargs)
def carenet_immunization_list(*args, **kwargs):
""" List the immunization data for a given carenet.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.immunization._immunization_list`.
"""
return _immunization_list(*args, **kwargs)
@marsloader(query_api_support=True)
def _immunization_list(request, group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record=None, carenet=None):
""" List the immunization objects matching the passed query parameters.
See :doc:`/query-api` for a listing of valid parameters.
Will return :http:statuscode:`200` with a list of immunizations on success,
:http:statuscode:`400` if any invalid query parameters were passed.
"""
q = FactQuery(Immunization, IMMUNIZATION_FILTERS,
group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record, carenet)
try:
return q.render(IMMUNIZATION_TEMPLATE)
except ValueError as e:
return HttpResponseBadRequest(str(e))
|
gpl-3.0
| 8,928,572,799,692,341,000
| 31.261538
| 77
| 0.692895
| false
| 3.489185
| false
| false
| false
|
zhanxw/bench
|
scripts/monitor.py
|
1
|
10275
|
#!/usr/bin/env python
import sys, os
from itertools import chain
# d is a pandas.DataFrame
def printTable(d, sep = ' ', outFile = sys.stderr):
cols = d.columns
col_widths = list(max(len(str(elem)) for elem in chain(d[col], [col])) for col in cols)
print >> sys.stderr, ' '.join('{value:>{width}}'.format(value=str(name), width=width) for name,width in zip(cols, col_widths))
for row in d.iterrows():
print >> sys.stderr, ' '.join('{value:>{width}}'.format(value=str(name), width=width) for name,width in zip(row[1], col_widths))
def calculateMean(timePoint, value):
#print "tp = ", timePoint
#print "value = ", value
if len(value) == 1:
return value
value = value[0:-1]
weight = timePoint[1:] - timePoint[0:-1]
totalSpan = timePoint[-1] - timePoint[0]
if any((i < 0 for i in weight)):
print >> sys.stderr, "Timepoint is not monotonelly increasing!"
return 0.
if totalSpan == 0.0:
return value[0]
avg = np.sum(weight * value) / totalSpan
return avg
def draw(dRaw, dg, outFile):
try:
import matplotlib
import matplotlib.pyplot as plt
except:
print >> sys.stderr, "Cannot import matplotlib, skipping generating graphs"
return
plt.rc('legend', fontsize=6)
plt.rc('ytick', labelsize = 'small')
fig = plt.figure(figsize = (15, 15))
ax = fig.add_subplot(3, 3, 1)
## increase space between subplots
fig.subplots_adjust(wspace = .5, hspace = .5)
getProgName = lambda x: x.split()[0].split('/')[-1]
dg['prog'] = dg.apply(lambda x: getProgName(x['cmd']) + '(%d)' % x['pid'] ,axis = 1)
dg.index = dg['prog']
dg[['utime']].plot(kind = 'barh', title = "User Time (s)", ax = ax)
plt.ylabel('')
# plt.yticks(rotation = 45) # this does not produce nice looking graphs
ax = fig.add_subplot(3, 3, 2)
dg[['stime']].plot(kind = 'barh', title = "System Time (s)", ax = ax)
plt.ylabel('')
ax = fig.add_subplot(3, 3, 3)
dg[['rtime']].plot(kind = 'barh', title = "Real Time (s)", ax = ax)
plt.ylabel('')
def scaleUnit(x):
if (x.max() > 1024 ** 3).all():
return ( 1024 ** 3, "Gb")
if (x.max() > 1024 ** 2).all():
return ( 1024 ** 2, "Mb")
if (x.max() > 1024).all():
return ( 1024, "Kb")
return ( 1, "B")
rssScale = scaleUnit(dRaw[['rss']])
dg[['maxRss']] = dg[['maxRss']] / rssScale[0]
ax = fig.add_subplot(3, 3, 4)
dg[['maxRss']].plot(kind = 'barh', title = "Max RSS (" + rssScale[1]+")", ax = ax)
plt.ylabel('')
dg[['avgRss']] = dg[['avgRss']] / rssScale[0]
ax = fig.add_subplot(3, 3, 5)
dg[['avgRss']].plot(kind = 'barh', title = "Avg RSS (" + rssScale[1]+")", ax = ax)
plt.ylabel('')
vmsScale = scaleUnit(dRaw[['vms']])
dg[['maxVms']] = dg[['maxVms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 6)
dg[['maxVms']].plot(kind = 'barh', title = "Max VMS (" + vmsScale[1]+")", ax = ax)
plt.ylabel('')
dg[['avgVms']] = dg[['avgVms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 7)
dg[['avgVms']].plot(kind = 'barh', title = "Avg VMS (" + vmsScale[1]+")", ax = ax)
plt.ylabel('')
def calculateYLimit(x, coef = 1.5):
a, b = x.min(), x.max()
c = (a + b) / 2
d = c - a
if d == 0.0:
return (a - 1, b + 1)
return (c - d * 1.5, c + d * 1.5)
dRaw['prog'] = dRaw.apply(lambda x: getProgName(x['cmd']) + '(%d)' % x['pid'] ,axis = 1)
dRaw['rss'] = dRaw['rss'] / rssScale[0]
ax = fig.add_subplot(3, 3, 8)
for k, v in dRaw.groupby('prog'):
plt.plot(v['rtime'], v['rss'], label = k, marker = '.')
plt.ylim(calculateYLimit(dRaw['rss']))
plt.title("RSS (%s) vs. Real Time (s)" % rssScale[1])
plt.legend()
#plt.legend(bbox_to_anchor=(1.05, 1), loc = 2)
dRaw[['vms']] = dRaw[['vms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 9)
for k, v in dRaw.groupby('prog'):
plt.plot(v['rtime'], v['vms'], label = k, marker = '.')
plt.ylim(calculateYLimit(dRaw['vms']))
plt.title("VMS (%s) vs. Real Time (s)" % vmsScale[1])
plt.legend()
#plt.legend(bbox_to_anchor=(1.05, 1), loc = 2)
fig.savefig(outFile)
def usage():
print("Usage: ")
print("%s [-i interval] [-o outputFile] [-s] [-t] [-g] [-q] commands" % sys.argv[0] )
print(" -i interval: sampling interval")
print(" -o outputFile: benchmark summary printed to 'outputFile' (default: stderr)")
print(" -t: output trace of benchmarking metrics (default: stderr; use -o to change)")
print(" -g: output a PNG graph showing cpu and memory usage (need matplotlib)")
print(" -q: quiet mode, do not output anything to the console")
print
if __name__ == '__main__':
try:
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'i:o:hstgq')
optlist = dict(optlist)
interval = float(optlist.get('-i', 0.1))
## to avoid record too many snapshots, scale up the value of interval
if '-i' in optlist:
intervalScaling = None
else:
intervalScaling = 2 * interval
if interval <= 0:
print >> sys.stderr, "Sampling interval should be larger than zero, but [ %s ] given" % optlist.get('-i')
sys.exit(1)
if '-o' in optlist:
outFile = optlist['-o']
else:
outFile = sys.stderr
# useShell = '-s' in optlist
outGraph = '-g' in optlist
outTrace = '-t' in optlist
trace = outGraph or outTrace
quietMode = '-q' in optlist
if '-h' in optlist:
usage()
sys.exit(0)
if len(args) == 0:
print >> sys.stderr, "No command(s) given. See helps below..."
usage()
sys.exit(0)
## print 'args = ', args
command = args
except:
usage()
raise
sys.exit(1)
import time
import psutil
import numpy as np
import pandas as pd
if outTrace:
print >> sys.stderr, '\t'.join(['pid', 'ppid', 'utime', 'stime', 'rtime', 'rss', 'vms', 'cwd', 'cmd'])
startTime = time.time()
mainProc = psutil.Popen(command, shell = False)
result = [] # time, pid, cwd, cmd, cpu_times, mem_info
# gather metrics while process/sub-process is still running.
activeSet = set() ##
activeSet.add(mainProc)
while activeSet:
## put all processes to the active queuee
newActiveSet = set()
mainProc.poll() ## need to call poll() so is_running() can work
for p in activeSet:
if p in newActiveSet: continue
try:
children = p.children()
for c in children:
if c.is_running():
newActiveSet.add(c)
except psutil.NoSuchProcess:
continue
activeSet |= newActiveSet
## examine each active proc
## remove inactive proc
toRemoveSet = set()
for p in activeSet:
try:
val = [
time.time() - startTime,
p.pid,
p.ppid(),
p.cpu_times(),
p.memory_info(),
p.cwd(),
p.cmdline()
]
except (psutil.NoSuchProcess, psutil.AccessDenied):
val = [
time.time() - startTime,
None,
None,
None,
None,
None,
None
]
if outTrace:
if val[1] != None:
print >> sys.stderr, '\t'.join(map(str, [val[1], val[2], val[3].user, val[3].system,val[0], val[4].rss, val[4].vms, val[5], ' '.join(val[6])]))
else:
print >> sys.stderr, '\t'.join(map(str, [None, None, None, None, val[0], None, None, None,None]))
if val[1] != None:
result.append(val)
if not p.is_running():
toRemoveSet.add(p)
activeSet -= toRemoveSet
## automatically increase check interval to save memory
if intervalScaling and len(result) % 1000 == 0:
interval = intervalScaling
intervalScaling *= 2
## wait a bit
time.sleep(interval)
# Summarize results
df = pd.DataFrame.from_items([('pid', [i[1] for i in result]),
('ppid', [i[2] for i in result]),
('utime', [i[3].user for i in result]),
('stime', [i[3].system for i in result]),
('rtime', [i[0] for i in result]),
('rss', [i[4].rss for i in result]),
('vms', [i[4].vms for i in result]),
('cwd', [i[5] for i in result]),
('cmd', [' '.join(i[6]) for i in result])
])
if outFile != sys.stderr:
df.to_csv(outFile + ".trace.csv", index = False)
# Group by pid
def f(x):
tp = np.copy(x['rtime'])
x['utime'] = np.max(x['utime'])
x['stime'] = np.max(x['stime'])
x['rtime'] = np.max(x['rtime'])
x['maxRss'] = np.max(x['rss'])
x['maxVms'] = np.max(x['vms'])
x['avgRss'] = calculateMean(tp, x['rss'])
x['avgVms'] = calculateMean(tp, x['vms'])
return x
dOut = df.groupby('pid').apply(f)
dOut = dOut.drop_duplicates(subset = 'pid')
dOut = pd.concat([dOut.drop(['cwd','cmd'], axis = 1), dOut[['cwd','cmd']]], axis = 1)
# print df
# print dOut
if outFile != sys.stderr:
dOut.to_csv(outFile + '.csv', index = False)
elif not quietMode:
printTable(dOut)
if outGraph:
if outFile == sys.stderr:
draw(df, dOut, "bench.png")
else:
draw(df, dOut, outFile + ".png")
|
gpl-2.0
| -1,233,636,329,327,026,400
| 34.926573
| 163
| 0.493917
| false
| 3.439906
| false
| false
| false
|
LLNL/spack
|
var/spack/repos/builtin/packages/util-linux/package.py
|
1
|
2266
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class UtilLinux(AutotoolsPackage):
"""Util-linux is a suite of essential utilities for any Linux system."""
homepage = "https://github.com/karelzak/util-linux"
url = "https://www.kernel.org/pub/linux/utils/util-linux/v2.29/util-linux-2.29.2.tar.gz"
list_url = "https://www.kernel.org/pub/linux/utils/util-linux"
list_depth = 1
version('2.35.1', sha256='37ac05d82c6410d89bc05d43cee101fefc8fe6cf6090b3ce7a1409a6f35db606')
version('2.35', sha256='98acab129a8490265052e6c1e033ca96d68758a13bb7fcd232c06bf16cc96238')
version('2.34', sha256='b62c92e5e1629642113cd41cec1ee86d1ee7e36b8ffe8ec3ac89c11797e9ac25')
version('2.33', sha256='952fb0d3498e81bd67b3c48e283c80cb12c719bc2357ec5801e7d420991ad319')
version('2.29.2', sha256='29ccdf91d2c3245dc705f0ad3bf729ac41d8adcdbeff914e797c552ecb04a4c7')
version('2.29.1', sha256='a6a7adba65a368e6dad9582d9fbedee43126d990df51266eaee089a73c893653')
version('2.25', sha256='7e43273a9e2ab99b5a54ac914fddf5d08ba7ab9b114c550e9f03474672bd23a1')
depends_on('python@2.7:')
depends_on('pkgconfig')
depends_on('gettext', when='+libmount')
# Make it possible to disable util-linux's libuuid so that you may
# reliably depend_on(`libuuid`).
variant('libuuid', default=True, description='Build libuuid')
variant('libmount', default=False, description='Build libmount.so with gettext')
def url_for_version(self, version):
url = "https://www.kernel.org/pub/linux/utils/util-linux/v{0}/util-linux-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
if '+libmount' in self.spec:
env.append_flags('LDFLAGS', '-L{0} -lintl'.format(
self.spec['gettext'].prefix.lib))
def configure_args(self):
config_args = [
'--disable-use-tty-group',
'--disable-makeinstall-chown',
'--without-systemd'
]
config_args.extend(self.enable_or_disable('libuuid'))
return config_args
|
lgpl-2.1
| 1,896,356,911,546,345,500
| 44.32
| 97
| 0.704766
| false
| 2.818408
| false
| false
| false
|
yeyanchao/calibre
|
setup/installer/osx/app/main.py
|
1
|
23748
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, plistlib, subprocess, glob, zipfile, tempfile, \
py_compile, stat, operator
abspath, join, basename = os.path.abspath, os.path.join, os.path.basename
from setup import __version__ as VERSION, __appname__ as APPNAME, basenames, \
modules as main_modules, Command, SRC, functions as main_functions
LICENSE = open('LICENSE', 'rb').read()
MAGICK_HOME='@executable_path/../Frameworks/ImageMagick'
ENV = dict(
FC_CONFIG_DIR='@executable_path/../Resources/fonts',
FC_CONFIG_FILE='@executable_path/../Resources/fonts/fonts.conf',
MAGICK_CONFIGURE_PATH=MAGICK_HOME+'/config',
MAGICK_CODER_MODULE_PATH=MAGICK_HOME+'/modules-Q16/coders',
MAGICK_CODER_FILTER_PATH=MAGICK_HOME+'/modules-Q16/filter',
QT_PLUGIN_PATH='@executable_path/../MacOS',
PYTHONIOENCODING='UTF-8',
)
SW = os.environ.get('SW', '/sw')
info = warn = None
class OSX32_Freeze(Command):
description = 'Freeze OSX calibre installation'
def add_options(self, parser):
parser.add_option('--test-launchers', default=False,
action='store_true',
help='Only build launchers')
def run(self, opts):
global info, warn
info, warn = self.info, self.warn
main(opts.test_launchers)
def compile_launcher_lib(contents_dir, gcc, base):
info('\tCompiling calibre_launcher.dylib')
fd = join(contents_dir, 'Frameworks')
dest = join(fd, 'calibre-launcher.dylib')
src = join(base, 'util.c')
cmd = [gcc] + '-Wall -arch i386 -arch x86_64 -dynamiclib -std=gnu99'.split() + [src] + \
['-I'+base] + \
['-I/sw/python/Python.framework/Versions/Current/Headers'] + \
'-current_version 1.0 -compatibility_version 1.0'.split() + \
'-fvisibility=hidden -o'.split() + [dest] + \
['-install_name',
'@executable_path/../Frameworks/'+os.path.basename(dest)] + \
['-F/sw/python', '-framework', 'Python', '-framework', 'CoreFoundation', '-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return dest
def compile_launchers(contents_dir, xprograms, pyver):
gcc = os.environ.get('CC', 'gcc')
base = os.path.dirname(__file__)
lib = compile_launcher_lib(contents_dir, gcc, base)
src = open(join(base, 'launcher.c'), 'rb').read()
env, env_vals = [], []
for key, val in ENV.items():
env.append('"%s"'% key)
env_vals.append('"%s"'% val)
env = ', '.join(env)+', '
env_vals = ', '.join(env_vals)+', '
src = src.replace('/*ENV_VARS*/', env)
src = src.replace('/*ENV_VAR_VALS*/', env_vals)
programs = [lib]
for program, x in xprograms.items():
module, func = x
info('\tCompiling', program)
out = join(contents_dir, 'MacOS', program)
programs.append(out)
psrc = src.replace('**PROGRAM**', program)
psrc = psrc.replace('**MODULE**', module)
psrc = psrc.replace('**FUNCTION**', func)
psrc = psrc.replace('**PYVER**', pyver)
fsrc = '/tmp/%s.c'%program
with open(fsrc, 'wb') as f:
f.write(psrc)
cmd = [gcc, '-Wall', '-arch', 'x86_64', '-arch', 'i386',
'-I'+base, fsrc, lib, '-o', out,
'-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return programs
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files if os.path.exists(fn)]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = reduce(operator.add, [len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
def flush(func):
def ff(*args, **kwargs):
sys.stdout.flush()
sys.stderr.flush()
ret = func(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return ret
return ff
class Py2App(object):
FID = '@executable_path/../Frameworks'
def __init__(self, build_dir, test_launchers=False):
self.build_dir = build_dir
self.contents_dir = join(self.build_dir, 'Contents')
self.resources_dir = join(self.contents_dir, 'Resources')
self.frameworks_dir = join(self.contents_dir, 'Frameworks')
self.version_info = '.'.join(map(str, sys.version_info[:2]))
self.site_packages = join(self.resources_dir, 'Python', 'site-packages')
self.to_strip = []
self.warnings = []
self.run(test_launchers)
def warn(self, *args):
warn(*args)
def run(self, test_launchers):
ret = 0
if not test_launchers:
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
self.create_skeleton()
self.create_plist()
self.add_python_framework()
self.add_site_packages()
self.add_stdlib()
self.add_qt_frameworks()
self.add_calibre_plugins()
self.add_podofo()
self.add_poppler()
self.add_libjpeg()
self.add_libpng()
self.add_fontconfig()
self.add_imagemagick()
self.add_misc_libraries()
self.add_resources()
self.compile_py_modules()
self.create_console_app()
self.copy_site()
self.create_exe()
if not test_launchers:
self.strip_files()
ret = self.makedmg(self.build_dir, APPNAME+'-'+VERSION)
return ret
@flush
def add_resources(self):
shutil.copytree('resources', os.path.join(self.resources_dir,
'resources'))
@flush
def strip_files(self):
info('\nStripping files...')
strip_files(self.to_strip)
@flush
def create_exe(self):
info('\nCreating launchers')
programs = {}
progs = []
for x in ('console', 'gui'):
progs += list(zip(basenames[x], main_modules[x], main_functions[x]))
for program, module, func in progs:
programs[program] = (module, func)
programs = compile_launchers(self.contents_dir, programs,
self.version_info)
for out in programs:
self.fix_dependencies_in_lib(out)
@flush
def set_id(self, path_to_lib, new_id):
old_mode = flipwritable(path_to_lib)
subprocess.check_call(['install_name_tool', '-id', new_id, path_to_lib])
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def get_dependencies(self, path_to_lib):
raw = subprocess.Popen(['otool', '-L', path_to_lib],
stdout=subprocess.PIPE).stdout.read()
for line in raw.splitlines():
if 'compatibility' not in line or line.strip().endswith(':'):
continue
idx = line.find('(')
path = line[:idx].strip()
yield path
@flush
def get_local_dependencies(self, path_to_lib):
for x in self.get_dependencies(path_to_lib):
for y in (SW+'/lib/', '/usr/local/lib/', SW+'/qt/lib/',
'/opt/local/lib/',
SW+'/python/Python.framework/', SW+'/freetype/lib/'):
if x.startswith(y):
if y == SW+'/python/Python.framework/':
y = SW+'/python/'
yield x, x[len(y):]
break
@flush
def change_dep(self, old_dep, new_dep, path_to_lib):
info('\tResolving dependency %s to'%old_dep, new_dep)
subprocess.check_call(['install_name_tool', '-change', old_dep, new_dep,
path_to_lib])
@flush
def fix_dependencies_in_lib(self, path_to_lib):
info('\nFixing dependencies in', path_to_lib)
self.to_strip.append(path_to_lib)
old_mode = flipwritable(path_to_lib)
for dep, bname in self.get_local_dependencies(path_to_lib):
ndep = self.FID+'/'+bname
self.change_dep(dep, ndep, path_to_lib)
if list(self.get_local_dependencies(path_to_lib)):
raise Exception('Failed to resolve deps in: '+path_to_lib)
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def add_python_framework(self):
info('\nAdding Python framework')
src = join('/sw/python', 'Python.framework')
x = join(self.frameworks_dir, 'Python.framework')
curr = os.path.realpath(join(src, 'Versions', 'Current'))
currd = join(x, 'Versions', basename(curr))
rd = join(currd, 'Resources')
os.makedirs(rd)
shutil.copy2(join(curr, 'Resources', 'Info.plist'), rd)
shutil.copy2(join(curr, 'Python'), currd)
self.set_id(join(currd, 'Python'),
self.FID+'/Python.framework/Versions/%s/Python'%basename(curr))
@flush
def add_qt_frameworks(self):
info('\nAdding Qt Framework')
for f in ('QtCore', 'QtGui', 'QtXml', 'QtNetwork', 'QtSvg', 'QtWebKit',
'QtXmlPatterns'):
self.add_qt_framework(f)
for d in glob.glob(join(SW, 'qt', 'plugins', '*')):
shutil.copytree(d, join(self.contents_dir, 'MacOS', basename(d)))
for l in glob.glob(join(self.contents_dir, 'MacOS', '*/*.dylib')):
self.fix_dependencies_in_lib(l)
x = os.path.relpath(l, join(self.contents_dir, 'MacOS'))
self.set_id(l, '@executable_path/'+x)
@flush
def add_qt_framework(self, f):
libname = f
f = f+'.framework'
src = join(SW, 'qt', 'lib', f)
ignore = shutil.ignore_patterns('Headers', '*.h', 'Headers/*')
dest = join(self.frameworks_dir, f)
shutil.copytree(src, dest, symlinks=True,
ignore=ignore)
lib = os.path.realpath(join(dest, libname))
rpath = os.path.relpath(lib, self.frameworks_dir)
self.set_id(lib, self.FID+'/'+rpath)
self.fix_dependencies_in_lib(lib)
@flush
def create_skeleton(self):
c = join(self.build_dir, 'Contents')
for x in ('Frameworks', 'MacOS', 'Resources'):
os.makedirs(join(c, x))
for x in ('library.icns', 'book.icns'):
shutil.copyfile(join('icons', x), join(self.resources_dir, x))
@flush
def add_calibre_plugins(self):
dest = join(self.frameworks_dir, 'plugins')
os.mkdir(dest)
for f in glob.glob('src/calibre/plugins/*.so'):
shutil.copy2(f, dest)
self.fix_dependencies_in_lib(join(dest, basename(f)))
@flush
def create_plist(self):
from calibre.ebooks import BOOK_EXTENSIONS
env = dict(**ENV)
env['CALIBRE_LAUNCHED_FROM_BUNDLE']='1';
docs = [{'CFBundleTypeName':'E-book',
'CFBundleTypeExtensions':list(BOOK_EXTENSIONS),
'CFBundleTypeRole':'Viewer',
}]
pl = dict(
CFBundleDevelopmentRegion='English',
CFBundleDisplayName=APPNAME,
CFBundleName=APPNAME,
CFBundleIdentifier='net.kovidgoyal.calibre',
CFBundleVersion=VERSION,
CFBundleShortVersionString=VERSION,
CFBundlePackageType='APPL',
CFBundleSignature='????',
CFBundleExecutable='calibre',
CFBundleDocumentTypes=docs,
LSMinimumSystemVersion='10.5.2',
LSRequiresNativeExecution=True,
NSAppleScriptEnabled=False,
NSHumanReadableCopyright='Copyright 2010, Kovid Goyal',
CFBundleGetInfoString=('calibre, an E-book management '
'application. Visit http://calibre-ebook.com for details.'),
CFBundleIconFile='library.icns',
LSMultipleInstancesProhibited=True,
NSHighResolutionCapable=True,
LSEnvironment=env
)
plistlib.writePlist(pl, join(self.contents_dir, 'Info.plist'))
@flush
def install_dylib(self, path, set_id=True):
shutil.copy2(path, self.frameworks_dir)
if set_id:
self.set_id(join(self.frameworks_dir, basename(path)),
self.FID+'/'+basename(path))
self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
@flush
def add_podofo(self):
info('\nAdding PoDoFo')
pdf = join(SW, 'lib', 'libpodofo.0.9.1.dylib')
self.install_dylib(pdf)
@flush
def add_poppler(self):
info('\nAdding poppler')
for x in ('libpoppler.27.dylib',):
self.install_dylib(os.path.join(SW, 'lib', x))
for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'):
self.install_dylib(os.path.join(SW, 'bin', x), False)
@flush
def add_libjpeg(self):
info('\nAdding libjpeg')
self.install_dylib(os.path.join(SW, 'lib', 'libjpeg.8.dylib'))
@flush
def add_libpng(self):
info('\nAdding libpng')
self.install_dylib(os.path.join(SW, 'lib', 'libpng12.0.dylib'))
self.install_dylib(os.path.join(SW, 'lib', 'libpng.3.dylib'))
@flush
def add_fontconfig(self):
info('\nAdding fontconfig')
for x in ('fontconfig.1', 'freetype.6', 'expat.1'):
src = os.path.join(SW, 'lib', 'lib'+x+'.dylib')
self.install_dylib(src)
dst = os.path.join(self.resources_dir, 'fonts')
if os.path.exists(dst):
shutil.rmtree(dst)
src = os.path.join(SW, 'etc', 'fonts')
shutil.copytree(src, dst, symlinks=False)
fc = os.path.join(dst, 'fonts.conf')
raw = open(fc, 'rb').read()
raw = raw.replace('<dir>/usr/share/fonts</dir>', '''\
<dir>/Library/Fonts</dir>
<dir>/Network/Library/Fonts</dir>
<dir>/System/Library/Fonts</dir>
<dir>/usr/X11R6/lib/X11/fonts</dir>
<dir>/usr/share/fonts</dir>
<dir>/var/root/Library/Fonts</dir>
<dir>/usr/share/fonts</dir>
''')
open(fc, 'wb').write(raw)
@flush
def add_imagemagick(self):
info('\nAdding ImageMagick')
for x in ('Wand', 'Core'):
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.5.dylib'%x))
idir = glob.glob(os.path.join(SW, 'lib', 'ImageMagick-*'))[-1]
dest = os.path.join(self.frameworks_dir, 'ImageMagick')
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(idir, dest, True)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def add_misc_libraries(self):
for x in ('usb-1.0.0', 'mtp.9', 'unrar', 'readline.6.1',
'wmflite-0.2.7', 'chm.0', 'sqlite3.0'):
info('\nAdding', x)
x = 'lib%s.dylib'%x
shutil.copy2(join(SW, 'lib', x), self.frameworks_dir)
dest = join(self.frameworks_dir, x)
self.set_id(dest, self.FID+'/'+x)
if 'mtp' in x:
self.fix_dependencies_in_lib(dest)
@flush
def add_site_packages(self):
info('\nAdding site-packages')
os.makedirs(self.site_packages)
paths = reversed(map(abspath, [x for x in sys.path if x.startswith('/')]))
upaths = []
for x in paths:
if x not in upaths and (x.endswith('.egg') or
x.endswith('/site-packages')):
upaths.append(x)
upaths.append(os.path.expanduser('~/build/calibre/src'))
for x in upaths:
info('\t', x)
tdir = None
try:
if not os.path.isdir(x):
try:
zf = zipfile.ZipFile(x)
except:
self.warn(x, 'is neither a directory nor a zipfile')
continue
tdir = tempfile.mkdtemp()
zf.extractall(tdir)
x = tdir
self.add_modules_from_dir(x)
self.add_packages_from_dir(x)
finally:
if tdir is not None:
shutil.rmtree(tdir)
shutil.rmtree(os.path.join(self.site_packages, 'calibre', 'plugins'))
self.remove_bytecode(join(self.resources_dir, 'Python', 'site-packages'))
@flush
def add_modules_from_dir(self, src):
for x in glob.glob(join(src, '*.py'))+glob.glob(join(src, '*.so')):
shutil.copy2(x, self.site_packages)
if x.endswith('.so'):
self.fix_dependencies_in_lib(x)
@flush
def add_packages_from_dir(self, src):
for x in os.listdir(src):
x = join(src, x)
if os.path.isdir(x) and os.path.exists(join(x, '__init__.py')):
if self.filter_package(basename(x)):
continue
self.add_package_dir(x)
@flush
def add_package_dir(self, x, dest=None):
def ignore(root, files):
ans = []
for y in files:
ext = os.path.splitext(y)[1]
if ext not in ('', '.py', '.so') or \
(not ext and not os.path.isdir(join(root, y))):
ans.append(y)
return ans
if dest is None:
dest = self.site_packages
dest = join(dest, basename(x))
shutil.copytree(x, dest, symlinks=True, ignore=ignore)
self.postprocess_package(x, dest)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def filter_package(self, name):
return name in ('Cython', 'modulegraph', 'macholib', 'py2app',
'bdist_mpkg', 'altgraph')
@flush
def postprocess_package(self, src_path, dest_path):
pass
@flush
def add_stdlib(self):
info('\nAdding python stdlib')
src = '/sw/python/Python.framework/Versions/Current/lib/python'
src += self.version_info
dest = join(self.resources_dir, 'Python', 'lib', 'python')
dest += self.version_info
os.makedirs(dest)
for x in os.listdir(src):
if x in ('site-packages', 'config', 'test', 'lib2to3', 'lib-tk',
'lib-old', 'idlelib', 'plat-mac', 'plat-darwin', 'site.py'):
continue
x = join(src, x)
if os.path.isdir(x):
self.add_package_dir(x, dest)
elif os.path.splitext(x)[1] in ('.so', '.py'):
shutil.copy2(x, dest)
dest2 = join(dest, basename(x))
if dest2.endswith('.so'):
self.fix_dependencies_in_lib(dest2)
self.remove_bytecode(join(self.resources_dir, 'Python', 'lib'))
confdir = join(self.resources_dir, 'Python',
'lib/python%s/config'%self.version_info)
os.makedirs(confdir)
shutil.copy2(join(src, 'config/Makefile'), confdir)
incdir = join(self.resources_dir, 'Python',
'include/python'+self.version_info)
os.makedirs(incdir)
shutil.copy2(join(src.replace('/lib/', '/include/'), 'pyconfig.h'),
incdir)
@flush
def remove_bytecode(self, dest):
for x in os.walk(dest):
root = x[0]
for f in x[-1]:
if os.path.splitext(f) in ('.pyc', '.pyo'):
os.remove(join(root, f))
@flush
def compile_py_modules(self):
info( '\nCompiling Python modules')
base = join(self.resources_dir, 'Python')
for x in os.walk(base):
root = x[0]
for f in x[-1]:
if f.endswith('.py'):
y = join(root, f)
rel = os.path.relpath(y, base)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
except:
self.warn('WARNING: Failed to byte-compile', y)
@flush
def create_console_app(self):
info( '\nCreating console.app')
cc_dir = os.path.join(self.contents_dir, 'console.app', 'Contents')
os.makedirs(cc_dir)
for x in os.listdir(self.contents_dir):
if x == 'console.app':
continue
if x == 'Info.plist':
plist = plistlib.readPlist(join(self.contents_dir, x))
plist['LSUIElement'] = '1'
plist.pop('CFBundleDocumentTypes')
plistlib.writePlist(plist, join(cc_dir, x))
else:
os.symlink(join('../..', x),
join(cc_dir, x))
@flush
def copy_site(self):
base = os.path.dirname(__file__)
shutil.copy2(join(base, 'site.py'), join(self.resources_dir, 'Python',
'lib', 'python'+self.version_info))
@flush
def makedmg(self, d, volname,
destdir='dist',
internet_enable=True,
format='UDBZ'):
''' Copy a directory d into a dmg named volname '''
info('\nCreating dmg')
sys.stdout.flush()
if not os.path.exists(destdir):
os.makedirs(destdir)
dmg = os.path.join(destdir, volname+'.dmg')
if os.path.exists(dmg):
os.unlink(dmg)
tdir = tempfile.mkdtemp()
appdir = os.path.join(tdir, os.path.basename(d))
shutil.copytree(d, appdir, symlinks=True)
subprocess.check_call(['/Users/kovid/sign.sh', appdir])
os.symlink('/Applications', os.path.join(tdir, 'Applications'))
subprocess.check_call(['/usr/bin/hdiutil', 'create', '-srcfolder', tdir,
'-volname', volname, '-format', format, dmg])
shutil.rmtree(tdir)
if internet_enable:
subprocess.check_call(['/usr/bin/hdiutil', 'internet-enable', '-yes', dmg])
size = os.stat(dmg).st_size/(1024*1024.)
info('\nInstaller size: %.2fMB\n'%size)
return dmg
def test_exe():
build_dir = abspath(join('build', APPNAME+'.app'))
py2app = Py2App(build_dir)
py2app.create_exe()
return 0
def main(test=False):
if 'test_exe' in sys.argv:
return test_exe()
build_dir = abspath(join(os.path.dirname(SRC), 'build', APPNAME+'.app'))
Py2App(build_dir, test_launchers=test)
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| 4,389,795,545,550,242,300
| 35.423313
| 116
| 0.542909
| false
| 3.539723
| true
| false
| false
|
Tecktron/quickmailer
|
quickmail.py
|
1
|
3408
|
import argparse
import os
import re
import sys
if __name__ == "__main__":
if sys.version_info < (3, 0):
print("This script requires version 3+ of python. Please try running it with command 'python3' instead")
exit(8)
parser = argparse.ArgumentParser(
description="Quick Mailer"
)
parser.add_argument("-m", "--message", dest="msg", type=str, required=True,
help="The plain text message or filename of a message to send")
parser.add_argument("-t", "--to", dest="to", nargs="+", metavar="email@domain.com", type=str,
help="Email address to recieve the message", required=True)
parser.add_argument("-f", "--from", dest="sender", type=str, required=False,
help="The from Email, if not provided, the settings will be used. NOTE: A specific address may "
"be required by your SMTP server")
parser.add_argument("-s", "--subject", dest="subject", required=True, type=str, help="The subject line")
parser.add_argument("-w", "--html", dest="html", action="store_true", required=False,
help="If using a file for m and file is html set this flag to use html email")
parser.add_argument("-a", "--attach", dest="attach", metavar="/path/to/file.txt", nargs="*", required=False,
help="files to attach (use full path)", default=[])
args = parser.parse_args()
# Here we inject the settings and load django
if not os.environ.get("DJANGO_SETTINGS_MODULE", False):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "base.settings")
try:
import django
from django.conf import settings
except ImportError:
django = None
settings = None
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
django.setup()
# don't import Django things until after setup or errors abound
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.utils.html import strip_tags
msg = ""
is_file = False
if os.path.isfile(args.msg) is False:
msg = "{}".format(args.msg)
else:
try:
msg = open(args.msg).read()
except OSError as e:
print("Could not read msg file, exception said: {}".format(e))
exit(4)
sender = args.sender
if not sender:
sender = settings.DEFAULT_FROM_EMAIL
if args.html:
# quick and dirty, create a plain text version.
# replace breaks and paragraphs with newlines
plain = re.sub("<br\s*?>", "\n", msg)
plain = re.sub("</p>", "\n\n", plain)
# strip the rest of the tags.
plain = strip_tags(plain)
email = EmailMultiAlternatives(args.subject, plain, sender, args.to)
email.attach_alternative(msg, "text/html")
else:
email = EmailMessage(args.subject, msg, sender, args.to)
if len(args.attach):
for attachment in args.attach:
if os.path.isfile(attachment):
email.attach_file(attachment)
sent = email.send()
if sent:
print("Email sent successfully")
else:
print("There was an issue sending the message")
|
mit
| -8,947,849,462,155,596,000
| 39.094118
| 120
| 0.60446
| false
| 4.191882
| false
| false
| false
|
siquick/mostplayed
|
mp/ss_playlist.py
|
1
|
2275
|
import requests
import json
import hashlib # used to generate the key for the insert
import base64
def req_auth():
# request authorization
auth_code = base64.b64encode('2b9b835a9d2d45eab79778233e9142e4:6783d4b5790a4f5aaa94b863c30fc215')
headers = {'Authorization': 'Basic ' + auth_code}
auth_url = 'https://accounts.spotify.com/api/token'
body = {'grant_type': 'client_credentials'}
r = requests.post(auth_url, data=body, headers=headers)
r_json = json.loads(r.text)
return r_json['access_token']
# gets a list of the good records that are on Spotify
def get_records():
query = db_select('''SELECT x.spotify_url,x.date,x.id,x.all_artists,x.title,sum(num) as total FROM
(SELECT releases.*,COUNT(listens.release_id) * 5 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.listens
ON listens.release_id=releases.id
#WHERE year(releases.date)='2017'
GROUP BY releases.id
UNION ALL
SELECT releases.*,COUNT(ce.release_id) * 10 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.charts_extended ce
ON ce.release_id=releases.id
#WHERE year(releases.date)='2017'
WHERE ce.url!='Ghost'
GROUP BY releases.id
UNION ALL
SELECT releases.*,COUNT(buys.release_id) * 15 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.buys
ON buys.release_id=releases.id
#WHERE year(releases.date)='2017'
GROUP BY releases.id
) as x
WHERE x.spotify_url!=''
AND datediff(now(),x.date) < 30
AND x.genre IN ('House','Techno','Disco','Bass')
GROUP by x.id
ORDER BY total DESC
LIMIT 0,10''', ())
get_data = query.fetchall()
for row in get_data:
print(row[0], row[3], row[4])
# add_done = add_tracks(access_token,num_tracks,time_range,user_id,owner_id,playlist_id,now)
access_token = get_access_token(code)
print(access_token)
# x = get_records()
# print(x)
|
gpl-3.0
| 4,837,883,844,478,587,000
| 35.693548
| 102
| 0.57978
| false
| 3.645833
| false
| false
| false
|
sonofmun/DissProject
|
Chapter_3/graph_cs_corps.py
|
1
|
1217
|
__author__ = 'matt'
"""
Bar chart demo with pairs of bars grouped for easy comparison.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#corps = [('NT', (0, 1, 2, 3, 4, 5)), ('LXX', (0, 1, 2, 3, 4, 5)), ('Josephus', (0, 1, 2, 3, 4, 5)), ('Philo', (0, 1, 2, 3, 4, 5)), ('Plutarch', (0, 1, 2, 3, 4, 5)), ('Perseus', (0, 1, 2, 3, 4, 5))]
corps = pd.DataFrame(np.random.random(size=(6, 6)), index=['NT', 'LXX', 'Josephus', 'Philo', 'Plutarch', 'Perseus'], columns=['NT', 'LXX', 'Josephus', 'Philo', 'Plutarch', 'Perseus'])
fig, ax = plt.subplots()
index = np.arange(len(corps))*1.2
bar_width = 0.15
opacity = 0.4
#error_config = {'ecolor': '0.3'}
mult = 0
for corp in corps:
rects = plt.bar(index + bar_width * mult, corps.ix[corp], bar_width, color='.9', label=corp)
rects.remove()
for i, rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height / 2, corp, size='small', rotation='vertical', ha='center', va='bottom')
mult += 1
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.xticks(index + 3 * bar_width, [x for x in corps])
plt.savefig('cs_corps_test.png', dpi=500)
|
gpl-3.0
| -7,344,222,544,633,724,000
| 32.833333
| 198
| 0.598192
| false
| 2.562105
| false
| false
| false
|
joelvbernier/hexrd-sandbox
|
multipanel_ff/spot_montage.py
|
1
|
4998
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 15:29:27 2017
@author: bernier2
"""
import argparse
import numpy as np
import h5py
from matplotlib import pyplot as plt
"""
# UNCOMMENT IF YOU HAVE A SANE LATEX ENV AND WANT NICE FIG LABELS
#
# Options
params = {'text.usetex': True,
'font.size': 14,
'font.family': 'mathrm',
'text.latex.unicode': True,
'pgf.texsystem': 'pdflatex'
}
plt.rcParams.update(params)
"""
plt.ion()
def montage(X, colormap=plt.cm.inferno, show_borders=True,
title=None, xlabel=None, ylabel=None,
threshold=None, filename=None):
m, n, count = np.shape(X)
img_data = np.log(X - np.min(X) + 1)
if threshold is None:
threshold = 0.
else:
threshold = np.log(threshold - np.min(X) + 1)
mm = int(np.ceil(np.sqrt(count)))
nn = mm
M = np.zeros((mm * m, nn * n))
# colormap
colormap.set_under('b')
fig, ax = plt.subplots()
image_id = 0
for j in range(mm):
sliceM = j * m
ax.plot()
for k in range(nn):
if image_id >= count:
img = np.nan*np.ones((m, n))
else:
img = img_data[:, :, image_id]
sliceN = k * n
M[sliceM:sliceM + m, sliceN:sliceN + n] = img
image_id += 1
# M = np.sqrt(M + np.min(M))
im = ax.imshow(M, cmap=colormap, vmin=threshold, interpolation='nearest')
if show_borders:
xs = np.vstack(
[np.vstack([[n*i, n*i] for i in range(nn+1)]),
np.tile([0, nn*n], (mm+1, 1))]
)
ys = np.vstack(
[np.tile([0, mm*m], (nn+1, 1)),
np.vstack([[m*i, m*i] for i in range(mm+1)])]
)
for xp, yp in zip(xs, ys):
ax.plot(xp, yp, 'c:')
if xlabel is None:
ax.set_xlabel(r'$2\theta$', FontSize=14)
else:
ax.set_xlabel(xlabel, FontSize=14)
if ylabel is None:
ax.set_ylabel(r'$\eta$', FontSize=14)
else:
ax.set_ylabel(ylabel, FontSize=14)
ax.axis('normal')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
cbar_ax = fig.add_axes([0.875, 0.155, 0.025, 0.725])
cbar = fig.colorbar(im, cax=cbar_ax)
cbar.set_label(r"$\ln(intensity)$", labelpad=5)
ax.set_xticks([])
ax.set_yticks([])
if title is not None:
ax.set_title(title, FontSize=18)
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=300)
return M
def plot_gvec_from_hdf5(fname, gvec_id, threshold=0.):
"""
"""
f = h5py.File(fname, 'r')
for det_key, panel_data in f['reflection_data'].iteritems():
for spot_id, spot_data in panel_data.iteritems():
attrs = spot_data.attrs
if attrs['hkl_id'] == gvec_id:
# grab some data
tth_crd = np.degrees(spot_data['tth_crd'])
eta_crd = np.degrees(spot_data['eta_crd'])
intensities = np.transpose(
np.array(spot_data['intensities']),
(1, 2, 0)
)
# make labels
figname = r'Spot %d, ' % attrs['peak_id'] \
+ r"detector '%s', " % det_key \
+ r'({:^3} {:^3} {:^3})'.format(*attrs['hkl'])
xlabel = r'$2\theta\in(%.3f, %.3f)$' \
% (tth_crd[0], tth_crd[-1])
ylabel = r'$\eta\in(%.3f, %.3f)$' \
% (eta_crd[0], eta_crd[-1])
# make montage
montage(intensities, title=figname,
xlabel=xlabel, ylabel=ylabel,
threshold=threshold)
pass
pass
pass
f.close()
return
# =============================================================================
# %% CMD LINE HOOK
# =============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Montage of spot data for a specifed G-vector family")
parser.add_argument('hdf5_archive',
help="hdf5 archive filename",
type=str)
parser.add_argument('gvec_id',
help="unique G-vector ID from PlaneData",
type=int)
parser.add_argument('-t', '--threshold',
help="intensity threshold",
type=float, default=0.)
args = parser.parse_args()
h5file = args.hdf5_archive
hklid = args.gvec_id
threshold = args.threshold
plot_gvec_from_hdf5(h5file, hklid, threshold=threshold)
|
gpl-3.0
| 2,192,930,690,850,654,200
| 29.055901
| 79
| 0.473589
| false
| 3.393075
| false
| false
| false
|
WoLpH/EventGhost
|
eg/Core.py
|
1
|
12315
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
.. attribute:: globals
:class:`eg.Bunch` instance, that holds all global variables used by
PythonCommand actions. PythonScripts (and all other code) can access
these globals through :obj:`eg.globals`.
.. attribute:: event
Instance of the :class:`eg.EventGhostEvent` instance, that is currently
been processed.
.. autofunction:: eg.DummyFunc
"""
import asyncore
import locale
import os
import socket
import sys
import threading
import time
import wx
from os.path import exists, join
# Local imports
import eg
import Init
eg.APP_NAME = "EventGhost"
eg.CORE_PLUGIN_GUIDS = (
"{9D499A2C-72B6-40B0-8C8C-995831B10BB4}", # "EventGhost"
"{A21F443B-221D-44E4-8596-E1ED7100E0A4}", # "System"
"{E974D074-B0A3-4D0C-BBD1-992475DDD69D}", # "Window"
"{6B1751BF-F94E-4260-AB7E-64C0693FD959}", # "Mouse"
)
eg.ID_TEST = wx.NewId()
eg.mainDir = eg.Cli.mainDir
eg.imagesDir = join(eg.mainDir, "images")
eg.languagesDir = join(eg.mainDir, "languages")
eg.sitePackagesDir = join(
eg.mainDir,
"lib%d%d" % sys.version_info[:2],
"site-packages"
)
eg.revision = 2000 # Deprecated
eg.startupArguments = eg.Cli.args
eg.debugLevel = 0
eg.systemEncoding = locale.getdefaultlocale()[1]
eg.document = None
eg.result = None
eg.plugins = eg.Bunch()
eg.globals = eg.Bunch()
eg.globals.eg = eg
eg.event = None
eg.eventTable = {}
eg.eventString = ""
eg.notificationHandlers = {}
eg.programCounter = None
eg.programReturnStack = []
eg.indent = 0
eg.pluginList = []
eg.mainThread = threading.currentThread()
eg.stopExecutionFlag = False
eg.lastFoundWindows = []
eg.currentItem = None
eg.actionGroup = eg.Bunch()
eg.actionGroup.items = []
eg.folderPath = eg.FolderPath()
def _CommandEvent():
"""Generate new (CmdEvent, Binder) tuple
e.g. MooCmdEvent, EVT_MOO = EgCommandEvent()
"""
evttype = wx.NewEventType()
class _Event(wx.PyCommandEvent):
def __init__(self, id, **kw):
wx.PyCommandEvent.__init__(self, evttype, id)
self.__dict__.update(kw)
if not hasattr(self, "value"):
self.value = None
def GetValue(self):
return self.value
def SetValue(self, value):
self.value = value
return _Event, wx.PyEventBinder(evttype, 1)
eg.CommandEvent = _CommandEvent
eg.ValueChangedEvent, eg.EVT_VALUE_CHANGED = eg.CommandEvent()
eg.pyCrustFrame = None
eg.dummyAsyncoreDispatcher = None
if eg.startupArguments.configDir is None:
eg.configDir = join(eg.folderPath.RoamingAppData, eg.APP_NAME)
else:
eg.configDir = eg.startupArguments.configDir
if not exists(eg.configDir):
try:
os.makedirs(eg.configDir)
except:
pass
if eg.startupArguments.isMain:
if exists(eg.configDir):
os.chdir(eg.configDir)
else:
os.chdir(eg.mainDir)
eg.localPluginDir = join(eg.folderPath.ProgramData, eg.APP_NAME, "plugins")
eg.corePluginDir = join(eg.mainDir, "plugins")
eg.pluginDirs = [eg.corePluginDir, eg.localPluginDir]
Init.InitPathsAndBuiltins()
from eg.WinApi.Dynamic import GetCurrentProcessId # NOQA
eg.processId = GetCurrentProcessId()
Init.InitPil()
class Exception(Exception):
def __unicode__(self):
try:
return "\n".join([unicode(arg) for arg in self.args])
except UnicodeDecodeError:
return "\n".join([str(arg).decode('mbcs') for arg in self.args])
class StopException(Exception):
pass
class HiddenAction:
pass
def Bind(notification, listener):
if notification not in eg.notificationHandlers:
notificationHandler = eg.NotificationHandler()
eg.notificationHandlers[notification] = notificationHandler
else:
notificationHandler = eg.notificationHandlers[notification]
notificationHandler.listeners.append(listener)
def CallWait(func, *args, **kwargs):
result = [None]
event = threading.Event()
def CallWaitWrapper():
try:
result[0] = func(*args, **kwargs)
finally:
event.set()
wx.CallAfter(CallWaitWrapper)
event.wait()
return result[0]
def DummyFunc(*dummyArgs, **dummyKwargs):
"""
Just a do-nothing-function, that accepts arbitrary arguments.
"""
pass
def Exit():
"""
Sometimes you want to quickly exit a PythonScript, because you don't
want to build deeply nested if-structures for example. eg.Exit() will
exit your PythonScript immediately.
(Note: This is actually a sys.exit() but will not exit EventGhost,
because the SystemExit exception is catched for a PythonScript.)
"""
sys.exit()
def HasActiveHandler(eventstring):
for eventHandler in eg.eventTable.get(eventstring, []):
obj = eventHandler
while obj:
if not obj.isEnabled:
break
obj = obj.parent
else:
return True
return False
def MessageBox(message, caption=eg.APP_NAME, style=wx.OK, parent=None):
if parent is None:
style |= wx.STAY_ON_TOP
dialog = eg.MessageDialog(parent, message, caption, style)
result = dialog.ShowModal()
dialog.Destroy()
return result
def Notify(notification, value=None):
if notification in eg.notificationHandlers:
for listener in eg.notificationHandlers[notification].listeners:
listener(value)
# pylint: disable-msg=W0613
def RegisterPlugin(
name = None,
description = None,
kind = "other",
author = "[unknown author]",
version = "[unknown version]",
icon = None,
canMultiLoad = False,
createMacrosOnAdd = False,
url = None,
help = None,
guid = None,
**kwargs
):
"""
Registers information about a plugin to EventGhost.
:param name: should be a short descriptive string with the name of the
plugin.
:param description: a short description of the plugin.
:param kind: gives a hint about the category the plugin belongs to. It
should be a string with a value out of ``"remote"`` (for remote
receiver plugins), ``"program"`` (for program control plugins),
``"external"`` (for plugins that control external hardware) or
``"other"`` (if none of the other categories match).
:param author: can be set to the name or a list of names of the
developer(s) of the plugin.
:param version: can be set to a version string.
:param icon: can be a base64 encoded image for the plugin. If
``icon == None``, an "icon.png" will be used if it exists
in the plugin folder.
:param canMultiLoad: set this to ``True``, if a configuration can have
more than one instance of this plugin.
:param createMacrosOnAdd: if set to ``True``, when adding the plugin,
EventGhost will ask the user, if he/she wants to add a folder with all
actions of this plugin to his/her configuration.
:param url: displays a clickable link in the plugin info dialog.
:param help: a longer description and/or additional information for the
plugin. Will be added to
'description'.
:param guid: will help EG to identify your plugin, so there are no name
clashes with other plugins that accidentally might have the same
name and will later ease the update of plugins.
:param \*\*kwargs: just to consume unknown parameters, to make the call
backward compatible.
"""
pass
# pylint: enable-msg=W0613
def RestartAsyncore():
"""
Informs the asyncore loop of a new socket to handle.
"""
oldDispatcher = eg.dummyAsyncoreDispatcher
dispatcher = asyncore.dispatcher()
dispatcher.create_socket(socket.AF_INET, socket.SOCK_STREAM)
eg.dummyAsyncoreDispatcher = dispatcher
if oldDispatcher:
oldDispatcher.close()
if oldDispatcher is None:
# create a global asyncore loop thread
threading.Thread(target=asyncore.loop, name="AsyncoreThread").start()
def RunProgram():
eg.stopExecutionFlag = False
del eg.programReturnStack[:]
while eg.programCounter is not None:
programCounter = eg.programCounter
item, idx = programCounter
item.Execute()
if eg.programCounter == programCounter:
# program counter has not changed. Ask the parent for the next
# item.
if isinstance(item.parent, eg.MacroItem):
eg.programCounter = item.parent.GetNextChild(idx)
else:
eg.programCounter = None
while eg.programCounter is None and eg.programReturnStack:
# we have no next item in this level. So look in the return
# stack if any return has to be executed
eg.indent -= 2
item, idx = eg.programReturnStack.pop()
eg.programCounter = item.parent.GetNextChild(idx)
eg.indent = 0
def StopMacro(ignoreReturn=False):
"""
Instructs EventGhost to stop executing the current macro after the
current action (thus the PythonScript or PythonCommand) has finished.
"""
eg.programCounter = None
if ignoreReturn:
del eg.programReturnStack[:]
def Unbind(notification, listener):
eg.notificationHandlers[notification].listeners.remove(listener)
def Wait(secs, raiseException=True):
while secs > 0.0:
if eg.stopExecutionFlag:
if raiseException:
raise eg.StopException("Execution interrupted by the user.")
else:
return False
if secs > 0.1:
time.sleep(0.1)
else:
time.sleep(secs)
secs -= 0.1
return True
# now assign all the functions above to `eg`
eg.Bind = Bind
eg.CallWait = CallWait
eg.DummyFunc = DummyFunc
eg.Exception = Exception
eg.Exit = Exit
eg.HasActiveHandler = HasActiveHandler
eg.HiddenAction = HiddenAction
eg.MessageBox = MessageBox
eg.Notify = Notify
eg.RegisterPlugin = RegisterPlugin
eg.RestartAsyncore = RestartAsyncore
eg.RunProgram = RunProgram
eg.StopException = StopException
eg.StopMacro = StopMacro
eg.Unbind = Unbind
eg.Wait = Wait
eg.messageReceiver = eg.MainMessageReceiver()
eg.app = eg.App()
# we can't import the Icons module earlier, because wx.App must exist
import Icons # NOQA
eg.Icons = Icons
eg.log = eg.Log()
eg.Print = eg.log.Print
eg.PrintError = eg.log.PrintError
eg.PrintNotice = eg.log.PrintNotice
eg.PrintTraceback = eg.log.PrintTraceback
eg.PrintDebugNotice = eg.log.PrintDebugNotice
eg.PrintStack = eg.log.PrintStack
def TracebackHook(tType, tValue, traceback):
eg.log.PrintTraceback(excInfo=(tType, tValue, traceback))
sys.excepthook = TracebackHook
eg.colour = eg.Colour()
eg.config = eg.Config()
eg.debugLevel = int(eg.config.logDebug)
if eg.startupArguments.isMain and not eg.startupArguments.translate:
eg.text = eg.Text(eg.config.language)
else:
eg.text = eg.Text('en_EN')
eg.actionThread = eg.ActionThread()
eg.eventThread = eg.EventThread()
eg.pluginManager = eg.PluginManager()
eg.scheduler = eg.Scheduler()
eg.TriggerEvent = eg.eventThread.TriggerEvent
eg.TriggerEnduringEvent = eg.eventThread.TriggerEnduringEvent
from eg.WinApi.SendKeys import SendKeysParser # NOQA
eg.SendKeys = SendKeysParser()
setattr(eg, "PluginClass", eg.PluginBase)
setattr(eg, "ActionClass", eg.ActionBase)
eg.taskBarIcon = eg.TaskBarIcon(
eg.startupArguments.isMain and
eg.config.showTrayIcon and
not eg.startupArguments.translate and
not eg.startupArguments.install and
not eg.startupArguments.pluginFile
)
eg.SetProcessingState = eg.taskBarIcon.SetProcessingState
eg.Init = Init
eg.Init.Init()
|
gpl-2.0
| 5,448,933,114,497,041,000
| 29.939698
| 78
| 0.687835
| false
| 3.687931
| true
| false
| false
|
milo-minderbinder/jira
|
jira/client.py
|
1
|
110052
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
import datetime
import calendar
import hashlib
from six.moves.urllib.parse import urlparse, urlencode
from requests.utils import get_netrc_auth
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
try:
from requests_jwt import JWTAuth
except ImportError:
pass
# JIRA specific resources
from .resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from .resources import Board, Sprint
from .resilientsession import ResilientSession
from .version import __version__
from .utils import threaded_requests, json_loads, CaseInsensitiveDict
from .exceptions import JIRAError
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class QshGenerator:
def __init__(self, context_path):
self.context_path = context_path
def __call__(self, req):
parse_result = urlparse(req.url)
path = parse_result.path[len(self.context_path):] if len(self.context_path) > 1 else parse_result.path
query = '&'.join(sorted(parse_result.query.split("&")))
qsh = '%(method)s&%(path)s&%(query)s' % {'method': req.method.upper(), 'path': path, 'query': query}
return hashlib.sha256(qsh).hexdigest()
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"context_path": "/",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, jwt=None,
validate=False, get_server_info=True, async=False, logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param jwt: A dict of properties for JWT authentication supported by Atlassian Connect. The following
properties are required:
* secret -- shared secret as delivered during 'installed' lifecycle event
(see https://developer.atlassian.com/static/connect/docs/latest/modules/lifecycle.html for details)
* payload -- dict of fields to be inserted in the JWT payload, e.g. 'iss'
Example jwt structure: ``{'secret': SHARED_SECRET, 'payload': {'iss': PLUGIN_KEY}}``
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param get_server_info: If true it will fetch server version info first to determine if some API calls
are available.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
self._rank = None
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
context_path = urlparse(self._options['server']).path
if len(context_path) > 0:
self._options['context_path'] = context_path
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
elif jwt:
self._create_jwt_session(jwt)
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
if get_server_info:
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
else:
self._version = (0, 0, 0)
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
# TODO: check if this works with non-admin accounts
self._fields = {}
for f in self.fields():
if 'clauseNames' in f:
for name in f['clauseNames']:
self._fields[name] = f['id']
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
# url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'application/octet-stream')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
def update_filter(self, filter_id,
name=None, description=None,
jql=None, favourite=None):
"""
Updates a filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
filter = self.filter(filter_id)
data = {}
data['name'] = name or filter.name
data['description'] = description or filter.description
data['jql'] = jql or filter.jql
data['favourite'] = favourite or filter.favourite
url = self._get_url('filter/%s' % filter_id)
r = self._session.put(url, headers={'content-type': 'application/json'},
data=json.dumps(data))
raw_filter_json = json.loads(r.text)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=9999):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to 9999
"""
params = {}
groups = []
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
for group in self._get_json('groups/picker', params=params)['groups']:
groups.append(group['name'])
return sorted(groups)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
try:
applicationlinks = self.applicationlinks()
except JIRAError as e:
applicationlinks = []
# In many (if not most) configurations, non-admin users are
# not allowed to list applicationlinks; if we aren't allowed,
# let's let people try to add remote links anyway, we just
# won't be able to be quite as helpful.
warnings.warn(
"Unable to gather applicationlinks; you will not be able "
"to add links to remote issues: (%s) %s" % (
e.status_code,
e.text
),
Warning
)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in applicationlinks:
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in applicationlinks:
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
def add_simple_link(self, issue, object):
"""
Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data.
``object`` should be a dict containing at least ``url`` to the linked external URL
and ``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param object: the dictionary used to create remotelink data
"""
data = {}
# hard code data dict to be passed as ``object`` to avoid any permissions errors
data = object
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
simple_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return simple_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = self.transitions(issue)
id = None
for transition in transitions_json:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
if isinstance(fields, ("".__class__, u"".__class__)):
fields = fields.split(",")
# this will translate JQL field names to REST API Name
# most people do know the JQL names so this will help them use the API easier
untranslate = {} # use to add friendly aliases when we get the results back
if self._fields:
for i, field in enumerate(fields):
if field in self._fields:
untranslate[self._fields[field]] = fields[i]
fields[i] = self._fields[field]
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
if untranslate:
for i in issues:
for k, v in untranslate.items():
if k in i.raw['fields']:
i.raw['fields'][v] = i.raw['fields'][k]
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
def myself(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('myself')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
@staticmethod
def _timestamp(dt=None):
t = datetime.datetime.utcnow()
if dt is not None:
t += dt
return calendar.timegm(t.timetuple())
def _create_jwt_session(self, jwt):
try:
jwt_auth = JWTAuth(jwt['secret'], alg='HS256')
except NameError as e:
globals()['logging'].error("JWT authentication requires requests_jwt")
raise e
jwt_auth.add_field("iat", lambda req: JIRA._timestamp())
jwt_auth.add_field("exp", lambda req: JIRA._timestamp(datetime.timedelta(minutes=3)))
jwt_auth.add_field("qsh", QshGenerator(self._options['context_path']))
for f in jwt['payload'].items():
jwt_auth.add_field(f[0], f[1])
self._session = ResilientSession()
self._session.verify = self._options['verify']
self._session.auth = jwt_auth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
def cleanup(x):
_magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
uri = '/secure/admin/DeleteProject.jspa'
url = self._options['server'] + uri
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
try:
r = self._gain_sudo_session(payload, uri)
if r.status_code != 200 or not self._check_for_html_error(r.text):
return False
except JIRAError as e:
raise JIRAError(0, "You must have global administrator rights to delete projects.")
return False
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def _gain_sudo_session(self, options, destination):
url = self._options['server'] + '/secure/admin/WebSudoAuthenticate.jspa'
if not self._session.auth:
self._session.auth = get_netrc_auth(url)
payload = {
'webSudoPassword': self._session.auth[1],
'webSudoDestination': destination,
'webSudoIsPost': 'true',
}
payload.update(options)
return self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
def create_project(self, key, name=None, assignee=None, type="Software"):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
if self._version[0] > 6:
# JIRA versions before 7 will throw an error if we specify type parameter
payload['type'] = type
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None,
fullname=None, notify=False, active=True):
'''
Creates a new JIRA user
:param username: the username of the new user
:type username: ``str``
:param email: email address of the new user
:type email: ``str``
:param directoryId: the directory ID the new user should be a part of
:type directoryId: ``int``
:param password: Optional, the password for the new user
:type password: ``str``
:param fullname: Optional, the full name of the new user
:type fullname: ``str``
:param notify: Whether or not to send a notification to the new user
:type notify ``bool``
:param active: Whether or not to make the new user active upon creation
:type active: ``bool``
:return:
'''
if not fullname:
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
if notify:
x['notification'] = 'True'
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None, state=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
if state:
payload['state'] = state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exist anymore and we need to use
# issue.update() to perform this operaiton
# Workaround based on https://answers.atlassian.com/questions/277651/jira-agile-rest-api-example
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
# Get the customFieldId for "Sprint"
sprint_field_name = "Sprint"
sprint_field_id = [f['schema']['customId'] for f in self.fields()
if f['name'] == sprint_field_name][0]
data = {}
data['idOrKeys'] = issue_keys
data['customFieldId'] = sprint_field_id
data['sprintId'] = sprint_id
data['addToBacklog'] = False
url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank':
if field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-lexo-rank":
self._rank = field['schema']['customId']
break
elif field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
# Obsolete since JIRA v6.3.13.1
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
|
bsd-2-clause
| -7,322,391,625,200,059,000
| 38.108742
| 243
| 0.592792
| false
| 4.245178
| false
| false
| false
|
sbeparey/CloudBot
|
plugins/foods.py
|
1
|
14374
|
import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
nick_re = re.compile("^[A-Za-z0-9_|.\-\]\[\{\}]*$", re.I)
cakes = ['Chocolate', 'Ice Cream', 'Angel', 'Boston Cream', 'Birthday', 'Bundt', 'Carrot', 'Coffee', 'Devils', 'Fruit',
'Gingerbread', 'Pound', 'Red Velvet', 'Stack', 'Welsh', 'Yokan']
cookies = ['Chocolate Chip', 'Oatmeal', 'Sugar', 'Oatmeal Raisin', 'Macadamia Nut', 'Jam Thumbprint', 'Medican Wedding',
'Biscotti', 'Oatmeal Cranberry', 'Chocolate Fudge', 'Peanut Butter', 'Pumpkin', 'Lemon Bar',
'Chocolate Oatmeal Fudge', 'Toffee Peanut', 'Danish Sugar', 'Triple Chocolate', 'Oreo']
# <Luke> Hey guys, any good ideas for plugins?
# <User> I don't know, something that lists every potato known to man?
# <Luke> BRILLIANT
potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip',
'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor',
'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent',
'Adirondack Blue', 'Adirondack Red', 'Adora', 'Agria', 'All Blue', 'All Red', 'Alpha', 'Alta Russet',
'Alturas Russet', 'Amandine', 'Amisk', 'Andover', 'Anoka', 'Anson', 'Aquilon', 'Arran Consul', 'Asterix',
'Atlantic', 'Austrian Crescent', 'Avalanche', 'Banana', 'Bannock Russet', 'Batoche', 'BeRus',
'Belle De Fonteney', 'Belleisle', 'Bintje', 'Blossom', 'Blue Christie', 'Blue Mac', 'Brigus',
'Brise du Nord', 'Butte', 'Butterfinger', 'Caesar', 'CalWhite', 'CalRed', 'Caribe', 'Carlingford',
'Carlton', 'Carola', 'Cascade', 'Castile', 'Centennial Russet', 'Century Russet', 'Charlotte', 'Cherie',
'Cherokee', 'Cherry Red', 'Chieftain', 'Chipeta', 'Coastal Russet', 'Colorado Rose', 'Concurrent',
'Conestoga', 'Cowhorn', 'Crestone Russet', 'Crispin', 'Cupids', 'Daisy Gold', 'Dakota Pearl', 'Defender',
'Delikat', 'Denali', 'Desiree', 'Divina', 'Dundrod', 'Durango Red', 'Early Rose', 'Elba', 'Envol',
'Epicure', 'Eramosa', 'Estima', 'Eva', 'Fabula', 'Fambo', 'Fremont Russet', 'French Fingerling',
'Frontier Russet', 'Fundy', 'Garnet Chile', 'Gem Russet', 'GemStar Russet', 'Gemchip', 'German Butterball',
'Gigant', 'Goldrush', 'Granola', 'Green Mountain', 'Haida', 'Hertha', 'Hilite Russet', 'Huckleberry',
'Hunter', 'Huron', 'IdaRose', 'Innovator', 'Irish Cobbler', 'Island Sunshine', 'Ivory Crisp',
'Jacqueline Lee', 'Jemseg', 'Kanona', 'Katahdin', 'Kennebec', "Kerr's Pink", 'Keswick', 'Keuka Gold',
'Keystone Russet', 'King Edward VII', 'Kipfel', 'Klamath Russet', 'Krantz', 'LaRatte', 'Lady Rosetta',
'Latona', 'Lemhi Russet', 'Liberator', 'Lili', 'MaineChip', 'Marfona', 'Maris Bard', 'Maris Piper',
'Matilda', 'Mazama', 'McIntyre', 'Michigan Purple', 'Millenium Russet', 'Mirton Pearl', 'Modoc', 'Mondial',
'Monona', 'Morene', 'Morning Gold', 'Mouraska', 'Navan', 'Nicola', 'Nipigon', 'Niska', 'Nooksack',
'NorValley', 'Norchip', 'Nordonna', 'Norgold Russet', 'Norking Russet', 'Norland', 'Norwis', 'Obelix',
'Ozette', 'Peanut', 'Penta', 'Peribonka', 'Peruvian Purple', 'Pike', 'Pink Pearl', 'Prospect', 'Pungo',
'Purple Majesty', 'Purple Viking', 'Ranger Russet', 'Reba', 'Red Cloud', 'Red Gold', 'Red La Soda',
'Red Pontiac', 'Red Ruby', 'Red Thumb', 'Redsen', 'Rocket', 'Rose Finn Apple', 'Rose Gold', 'Roselys',
'Rote Erstling', 'Ruby Crescent', 'Russet Burbank', 'Russet Legend', 'Russet Norkotah', 'Russet Nugget',
'Russian Banana', 'Saginaw Gold', 'Sangre', 'Satina', 'Saxon', 'Sebago', 'Shepody', 'Sierra',
'Silverton Russet', 'Simcoe', 'Snowden', 'Spunta', "St. John's", 'Summit Russet', 'Sunrise', 'Superior',
'Symfonia', 'Tolaas', 'Trent', 'True Blue', 'Ulla', 'Umatilla Russet', 'Valisa', 'Van Gogh', 'Viking',
'Wallowa Russet', 'Warba', 'Western Russet', 'White Rose', 'Willamette', 'Winema', 'Yellow Finn',
'Yukon Gold']
def is_valid(target):
""" Checks if a string is a valid IRC nick. """
if nick_re.match(target):
return True
else:
return False
@hook.on_start()
def load_foods(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global sandwich_data, taco_data, coffee_data, noodles_data, muffin_data, \
tea_data, keto_data, beer_data, cheese_data, pancake_data, chicken_data, \
icecream_data, brekkie_data, doobie_data
with codecs.open(os.path.join(bot.data_dir, "sandwich.json"), encoding="utf-8") as f:
sandwich_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "taco.json"), encoding="utf-8") as f:
taco_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "coffee.json"), encoding="utf-8") as f:
coffee_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "noodles.json"), encoding="utf-8") as f:
noodles_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "muffin.json"), encoding="utf-8") as f:
muffin_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "tea.json"), encoding="utf-8") as f:
tea_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "keto.json"), encoding="utf-8") as f:
keto_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "cheese.json"), encoding="utf-8") as f:
cheese_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "pancake.json"), encoding="utf-8") as f:
pancake_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "chicken.json"), encoding="utf-8") as f:
chicken_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "brekkie.json"), encoding="utf-8") as f:
brekkie_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "icecream.json"), encoding="utf-8") as f:
icecream_data = json.load(f)
@asyncio.coroutine
@hook.command
def potato(text, action):
"""<user> - makes <user> a tasty little potato"""
user = text.strip()
if not is_valid(user):
return "I can't give a potato to that user."
potato_type = random.choice(potatoes)
size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['bakes', 'fries', 'boils', 'roasts'])
side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon'])
action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, user,
side_dish))
@asyncio.coroutine
@hook.command
def cake(text, action):
"""<user> - gives <user> an awesome cake"""
user = text.strip()
if not is_valid(user):
return "I can't give a cake to that user."
cake_type = random.choice(cakes)
size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['makes', 'gives', 'gets', 'buys'])
side_dish = random.choice(['glass of chocolate milk', 'bowl of ice cream', 'jar of cookies',
'side of chocolate sauce'])
action("{} {} a {} {} {} cake and serves it with a small {}!".format(method, user, flavor, size, cake_type,
side_dish))
@asyncio.coroutine
@hook.command
def cookie(text, action):
"""<user> - gives <user> a cookie"""
user = text.strip()
if not is_valid(user):
return "I can't give a cookie to that user."
cookie_type = random.choice(cookies)
size = random.choice(['small', 'little', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['makes', 'gives', 'gets', 'buys'])
side_dish = random.choice(['glass of milk', 'bowl of ice cream', 'bowl of chocolate sauce'])
action("{} {} a {} {} {} cookie and serves it with a {}!".format(method, user, flavor, size, cookie_type,
side_dish))
@asyncio.coroutine
@hook.command
def sandwich(text, action):
"""<user> - give a tasty sandwich to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give a sandwich to that user."
generator = textgen.TextGenerator(sandwich_data["templates"], sandwich_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def taco(text, action):
"""<user> - give a taco to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give a taco to that user."
generator = textgen.TextGenerator(taco_data["templates"], taco_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def drink(text, action):
"""<user> - give a drink to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give drinks to that user."
r = random.randint(1,2)
if r == 1:
generator = textgen.TextGenerator(coffee_data["templates"], coffee_data["parts"],
variables={"user": user})
else:
generator = textgen.TextGenerator(tea_data["templates"], tea_data["parts"],
variables={"user": user})
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def coffee(text, action):
"""<user> - give coffee to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give coffee to that user."
generator = textgen.TextGenerator(coffee_data["templates"], coffee_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
asyncio.coroutine
@hook.command
def noodles(text, action):
"""<user> - give noodles to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give noodles to that user."
generator = textgen.TextGenerator(noodles_data["templates"], noodles_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
asyncio.coroutine
@hook.command
def muffin(text, action):
"""<user> - give muffin to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give muffin to that user."
generator = textgen.TextGenerator(muffin_data["templates"], muffin_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def tea(text, action):
"""<user> - give tea to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give tea to that user."
generator = textgen.TextGenerator(tea_data["templates"], tea_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def keto(text, action):
"""<user> - give keto food to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give food to that user."
generator = textgen.TextGenerator(keto_data["templates"], keto_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def cheese(text, action):
"""<user> - give cheese to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give cheese to that user."
generator = textgen.TextGenerator(cheese_data["templates"], cheese_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def pancake(text, action):
"""<user> - give pancakes to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give pancakes to that user."
generator = textgen.TextGenerator(pancake_data["templates"], pancake_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def chicken(text, action):
"""<user> - give pancakes to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give pancakes to that user."
generator = textgen.TextGenerator(chicken_data["templates"], chicken_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def icecream(text, action):
"""<user> - give icecream to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give icecream to that user."
generator = textgen.TextGenerator(icecream_data["templates"], icecream_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command("brekky", "brekkie")
def brekkie(text, action):
"""<user> - give brekkie to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give brekkie to that user."
generator = textgen.TextGenerator(brekkie_data["templates"], brekkie_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string())
|
gpl-3.0
| -8,647,342,310,235,884,000
| 39.379213
| 120
| 0.599416
| false
| 3.091848
| false
| false
| false
|
berlotto/bolao-futebol
|
bolao/settings.py
|
1
|
2001
|
"""
Django settings for bolao project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5zhqg$4=*=24(7u(mj^-hn-#eg!k21i75&j9kg)*xz4*8$(_)s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apostas',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bolao.urls'
WSGI_APPLICATION = 'bolao.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = False
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
lgpl-3.0
| -7,687,704,100,187,002,000
| 23.108434
| 71
| 0.718141
| false
| 3.196486
| false
| false
| false
|
Onager/plaso
|
plaso/parsers/sqlite_plugins/chrome_cookies.py
|
1
|
8029
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Google Chrome cookies database files."""
from dfdatetime import webkit_time as dfdatetime_webkit_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
# Register the cookie plugins.
from plaso.parsers import cookie_plugins # pylint: disable=unused-import
from plaso.parsers import sqlite
from plaso.parsers.cookie_plugins import manager as cookie_plugins_manager
from plaso.parsers.sqlite_plugins import interface
class ChromeCookieEventData(events.EventData):
"""Chrome Cookie event data.
Attributes:
cookie_name (str): name of the cookie.
host (str): hostname of host that set the cookie value.
httponly (bool): True if the cookie cannot be accessed through client
side script.
path (str): path where the cookie got set.
persistent (bool): True if the cookie is persistent.
secure (bool): True if the cookie should only be transmitted over a
secure channel.
url (str): URL or path where the cookie got set.
data (str): value of the cookie.
"""
DATA_TYPE = 'chrome:cookie:entry'
def __init__(self):
"""Initializes event data."""
super(ChromeCookieEventData, self).__init__(data_type=self.DATA_TYPE)
self.cookie_name = None
self.data = None
self.host = None
self.httponly = None
self.path = None
self.persistent = None
self.secure = None
self.url = None
class BaseChromeCookiePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Chrome cookies database files."""
# Point to few sources for URL information.
URLS = [
'http://src.chromium.org/svn/trunk/src/net/cookies/',
('http://www.dfinews.com/articles/2012/02/'
'google-analytics-cookies-and-forensic-implications')]
# Google Analytics __utmz variable translation.
# Taken from:
# http://www.dfinews.com/sites/dfinews.com/files/u739/Tab2Cookies020312.jpg
GA_UTMZ_TRANSLATION = {
'utmcsr': 'Last source used to access.',
'utmccn': 'Ad campaign information.',
'utmcmd': 'Last type of visit.',
'utmctr': 'Keywords used to find site.',
'utmcct': 'Path to the page of referring link.'}
def __init__(self):
"""Initializes a plugin."""
super(BaseChromeCookiePlugin, self).__init__()
self._cookie_plugins = (
cookie_plugins_manager.CookiePluginsManager.GetPlugins())
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a cookie row.
Args:
parser_mediator (ParserMediator): parser mediator.
query (str): query that created the row.
row (sqlite3.Row): row resulting from the query.
"""
query_hash = hash(query)
cookie_name = self._GetRowValue(query_hash, row, 'name')
cookie_data = self._GetRowValue(query_hash, row, 'value')
hostname = self._GetRowValue(query_hash, row, 'host_key')
if hostname.startswith('.'):
hostname = hostname[1:]
httponly = self._GetRowValue(query_hash, row, 'httponly')
path = self._GetRowValue(query_hash, row, 'path')
persistent = self._GetRowValue(query_hash, row, 'persistent')
secure = self._GetRowValue(query_hash, row, 'secure')
if secure:
scheme = 'https'
else:
scheme = 'http'
url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)
event_data = ChromeCookieEventData()
event_data.cookie_name = cookie_name
event_data.data = cookie_data
event_data.host = hostname
event_data.httponly = bool(httponly)
event_data.path = path
event_data.persistent = bool(persistent)
event_data.query = query
event_data.secure = bool(secure)
event_data.url = url
timestamp = self._GetRowValue(query_hash, row, 'creation_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'last_access_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'expires_utc')
if timestamp:
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for plugin in self._cookie_plugins:
if cookie_name != plugin.COOKIE_NAME:
continue
try:
plugin.UpdateChainAndProcess(
parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name,
url=url)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning(
'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(
plugin.NAME, exception))
class Chrome17CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 17 - 65 cookies database files."""
NAME = 'chrome_17_cookies'
DATA_FORMAT = 'Google Chrome 17 - 65 cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'secure', 'httponly', 'last_access_utc', 'has_expires',
'persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'secure, httponly, last_access_utc, has_expires, persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL UNIQUE PRIMARY '
'KEY, host_key TEXT NOT NULL, name TEXT NOT NULL, value TEXT NOT '
'NULL, path TEXT NOT NULL, expires_utc INTEGER NOT NULL, secure '
'INTEGER NOT NULL, httponly INTEGER NOT NULL, last_access_utc '
'INTEGER NOT NULL, has_expires INTEGER DEFAULT 1, persistent '
'INTEGER DEFAULT 1)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
class Chrome66CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 66+ cookies database files."""
NAME = 'chrome_66_cookies'
DATA_FORMAT = 'Google Chrome 66 and later cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'is_secure', 'is_httponly', 'last_access_utc', 'has_expires',
'is_persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'is_secure AS secure, is_httponly AS httponly, last_access_utc, '
'has_expires, is_persistent AS persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL, host_key TEXT '
'NOT NULL, name TEXT NOT NULL, value TEXT NOT NULL, path TEXT NOT '
'NULL, expires_utc INTEGER NOT NULL, is_secure INTEGER NOT NULL, '
'is_httponly INTEGER NOT NULL, last_access_utc INTEGER NOT NULL, '
'has_expires INTEGER NOT NULL DEFAULT 1, is_persistent INTEGER NOT '
'NULL DEFAULT 1, priority INTEGER NOT NULL DEFAULT 1, '
'encrypted_value BLOB DEFAULT \'\', firstpartyonly INTEGER NOT NULL '
'DEFAULT 0, UNIQUE (host_key, name, path))'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
sqlite.SQLiteParser.RegisterPlugins([
Chrome17CookiePlugin, Chrome66CookiePlugin])
|
apache-2.0
| -3,767,058,547,770,239,000
| 36.872642
| 79
| 0.668078
| false
| 3.79084
| false
| false
| false
|
jonatascastro12/django-dashboard_view
|
django_select2_extension/fields.py
|
1
|
3839
|
from django.db.models.query_utils import Q
from django.forms.models import ModelChoiceIterator
from django_select2.fields import ChoiceMixin, AutoModelSelect2MultipleField, AutoModelSelect2Field
from django_select2_extension.widgets import AutoPhotoHeavySelect2MultipleWidget, AutoPhotoHeavySelect2Widget
class FilterableAdvancedModelChoiceIterator(ModelChoiceIterator):
"""
Extends ModelChoiceIterator to add the capability to apply additional
filter on the passed queryset and also return the obj instance.
"""
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj), obj)
def set_extra_filter(self, **filter_map):
"""
Applies additional filter on the queryset. This can be called multiple times.
:param filter_map: The ``**kwargs`` to pass to :py:meth:`django.db.models.query.QuerySet.filter`.
If this is not set then additional filter (if) applied before is removed.
"""
if not hasattr(self, '_original_queryset'):
import copy
self._original_queryset = copy.deepcopy(self.queryset)
if filter_map:
self.queryset = self._original_queryset.filter(**filter_map)
else:
self.queryset = self._original_queryset
class QuerysetAdvancedChoiceMixin(ChoiceMixin):
"""
Overrides ``choices``' getter to return instance of :py:class:`.FilterableAdvancedModelChoiceIterator`
instead.
"""
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return FilterableAdvancedModelChoiceIterator(self)
choices = property(_get_choices, ChoiceMixin._set_choices)
def __deepcopy__(self, memo):
result = super(QuerysetAdvancedChoiceMixin, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def prepare_qs_params(self, request, search_term, search_fields):
q = None
for field in search_fields:
kwargs = {}
search_term = search_term.strip()
if " " in search_term:
splitted_terms = search_term.split(" ")
for term in splitted_terms:
kwargs[field] = term
if q is None:
q = Q(**kwargs)
else:
q = q | Q(**kwargs)
else:
kwargs[field] = search_term
if q is None:
q = Q(**kwargs)
else:
q = q | Q(**kwargs)
return {'or': [q], 'and': {}}
class AutoPhotoModelSelect2Field(QuerysetAdvancedChoiceMixin, AutoModelSelect2Field):
widget = AutoPhotoHeavySelect2Widget
def extra_data_from_instance(self, obj):
return {'photo': obj.get_small_thumbnail()}
class AutoPhotoModelSelect2MultipleField(QuerysetAdvancedChoiceMixin, AutoModelSelect2MultipleField):
widget = AutoPhotoHeavySelect2MultipleWidget
def extra_data_from_instance(self, obj):
return {'photo': obj.get_small_thumbnail()}
|
gpl-2.0
| -1,333,794,018,231,232,500
| 41.197802
| 109
| 0.648085
| false
| 4.453596
| false
| false
| false
|
rh-lab-q/conflab
|
wsgi/openshift/confla/utils.py
|
1
|
1275
|
import os
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.utils.deconstruct import deconstructible
@deconstructible
class ConfRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.url_id, ext)
return os.path.join(self.path, filename)
@deconstructible
class UserRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.username, ext)
return os.path.join(self.path, filename)
@deconstructible
class PaperRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.user.username, ext)
return os.path.join(path, filename)
splash_rename_and_return_path = ConfRenamePath('splash/')
icon_rename_and_return_path = ConfRenamePath('icon/')
user_rename_and_return_path = UserRenamePath('avatars/')
paper_rename_and_return_path = PaperRenamePath('papers/')
|
gpl-3.0
| -7,650,763,214,364,231,000
| 28.651163
| 62
| 0.661176
| false
| 3.663793
| false
| false
| false
|
group-policy/rally
|
rally/plugins/openstack/scenarios/murano/utils.py
|
1
|
9884
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
import zipfile
from oslo_config import cfg
import yaml
from rally.common import fileutils
from rally.common import utils as common_utils
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
MURANO_BENCHMARK_OPTS = [
cfg.IntOpt("murano_deploy_environment_timeout", default=1200,
deprecated_name="deploy_environment_timeout",
help="A timeout in seconds for an environment deploy"),
cfg.IntOpt("murano_deploy_environment_check_interval", default=5,
deprecated_name="deploy_environment_check_interval",
help="Deploy environment check interval in seconds"),
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(MURANO_BENCHMARK_OPTS, group=benchmark_group)
class MuranoScenario(scenario.OpenStackScenario):
"""Base class for Murano scenarios with basic atomic actions."""
@atomic.action_timer("murano.list_environments")
def _list_environments(self):
"""Return environments list."""
return self.clients("murano").environments.list()
@atomic.action_timer("murano.create_environment")
def _create_environment(self):
"""Create environment.
:param env_name: String used to name environment
:returns: Environment instance
"""
env_name = self.generate_random_name()
return self.clients("murano").environments.create({"name": env_name})
@atomic.action_timer("murano.delete_environment")
def _delete_environment(self, environment):
"""Delete given environment.
Return when the environment is actually deleted.
:param environment: Environment instance
"""
self.clients("murano").environments.delete(environment.id)
@atomic.action_timer("murano.create_session")
def _create_session(self, environment_id):
"""Create session for environment with specific id
:param environment_id: Environment id
:returns: Session instance
"""
return self.clients("murano").sessions.configure(environment_id)
@atomic.optional_action_timer("murano.create_service")
def _create_service(self, environment, session, full_package_name,
image_name=None, flavor_name=None):
"""Create Murano service.
:param environment: Environment instance
:param session: Session instance
:param full_package_name: full name of the Murano package
:param image_name: Image name
:param flavor_name: Flavor name
:param atomic_action: True if this is atomic action. added and
handled by the optional_action_timer()
decorator
:returns: Service instance
"""
app_id = str(uuid.uuid4())
data = {"?": {"id": app_id,
"type": full_package_name},
"name": self.generate_random_name()}
return self.clients("murano").services.post(
environment_id=environment.id, path="/", data=data,
session_id=session.id)
@atomic.action_timer("murano.deploy_environment")
def _deploy_environment(self, environment, session):
"""Deploy environment.
:param environment: Environment instance
:param session: Session instance
"""
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.benchmark
utils.wait_for(
environment, is_ready=utils.resource_is("READY"),
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
timeout=config.murano_deploy_environment_timeout,
check_interval=config.murano_deploy_environment_check_interval
)
@atomic.action_timer("murano.list_packages")
def _list_packages(self, include_disabled=False):
"""Returns packages list.
:param include_disabled: if "True" then disabled packages will be
included in a the result.
Default value is False.
:returns: list of imported packages
"""
return self.clients("murano").packages.list(
include_disabled=include_disabled)
@atomic.action_timer("murano.import_package")
def _import_package(self, package):
"""Import package to the Murano.
:param package: path to zip archive with Murano application
:returns: imported package
"""
package = self.clients("murano").packages.create(
{}, {"file": open(package)}
)
return package
@atomic.action_timer("murano.delete_package")
def _delete_package(self, package):
"""Delete specified package.
:param package: package that will be deleted
"""
self.clients("murano").packages.delete(package.id)
@atomic.action_timer("murano.update_package")
def _update_package(self, package, body, operation="replace"):
"""Update specified package.
:param package: package that will be updated
:param body: dict object that defines what package property will be
updated, e.g {"tags": ["tag"]} or {"enabled": "true"}
:param operation: string object that defines the way of how package
property will be updated, allowed operations are
"add", "replace" or "delete".
Default value is "replace".
:returns: updated package
"""
return self.clients("murano").packages.update(
package.id, body, operation)
@atomic.action_timer("murano.filter_applications")
def _filter_applications(self, filter_query):
"""Filter list of uploaded application by specified criteria.
:param filter_query: dict that contains filter criteria, it
will be passed as **kwargs to filter method
e.g. {"category": "Web"}
:returns: filtered list of packages
"""
return self.clients("murano").packages.filter(**filter_query)
def _zip_package(self, package_path):
"""Call _prepare_package method that returns path to zip archive."""
return MuranoPackageManager(self.task)._prepare_package(package_path)
class MuranoPackageManager(common_utils.RandomNameGeneratorMixin):
RESOURCE_NAME_FORMAT = "app.rally_XXXXXXXX_XXXXXXXX"
def __init__(self, task):
self.task = task
@staticmethod
def _read_from_file(filename):
with open(filename, "r") as f:
read_data = f.read()
return yaml.safe_load(read_data)
@staticmethod
def _write_to_file(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def _change_app_fullname(self, app_dir):
"""Change application full name.
To avoid name conflict error during package import (when user
tries to import a few packages into the same tenant) need to change the
application name. For doing this need to replace following parts
in manifest.yaml
from
...
FullName: app.name
...
Classes:
app.name: app_class.yaml
to:
...
FullName: <new_name>
...
Classes:
<new_name>: app_class.yaml
:param app_dir: path to directory with Murano application context
"""
new_fullname = self.generate_random_name()
manifest_file = os.path.join(app_dir, "manifest.yaml")
manifest = self._read_from_file(manifest_file)
class_file_name = manifest["Classes"][manifest["FullName"]]
# update manifest.yaml file
del manifest["Classes"][manifest["FullName"]]
manifest["FullName"] = new_fullname
manifest["Classes"][new_fullname] = class_file_name
self._write_to_file(manifest, manifest_file)
def _prepare_package(self, package_path):
"""Check whether the package path is path to zip archive or not.
If package_path is not a path to zip archive but path to Murano
application folder, than method prepares zip archive with Murano
application. It copies directory with Murano app files to temporary
folder, changes manifest.yaml and class file (to avoid '409 Conflict'
errors in Murano) and prepares zip package.
:param package_path: path to zip archive or directory with package
components
:returns: path to zip archive with Murano application
"""
if not zipfile.is_zipfile(package_path):
tmp_dir = tempfile.mkdtemp()
pkg_dir = os.path.join(tmp_dir, "package/")
try:
shutil.copytree(package_path, pkg_dir)
self._change_app_fullname(pkg_dir)
package_path = fileutils.pack_dir(pkg_dir)
finally:
shutil.rmtree(tmp_dir)
return package_path
|
apache-2.0
| -1,880,708,867,045,607,000
| 35.072993
| 79
| 0.625152
| false
| 4.379265
| false
| false
| false
|
cgstudiomap/cgstudiomap
|
main/eggs/python_stdnum-1.2-py2.7.egg/stdnum/iban.py
|
1
|
3897
|
# iban.py - functions for handling International Bank Account Numbers (IBANs)
#
# Copyright (C) 2011, 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IBAN (International Bank Account Number).
The IBAN is used to identify bank accounts across national borders. The
first two letters are a country code. The next two digits are check digits
for the ISO 7064 Mod 97, 10 checksum. Each country uses its own format
for the remainder of the number.
Some countries may also use checksum algorithms within their number but
this is currently not checked by this number.
>>> validate('GR16 0110 1050 0000 1054 7023 795')
'GR1601101050000010547023795'
>>> validate('BE31435411161155')
'BE31435411161155'
>>> compact('GR16 0110 1050 0000 1054 7023 795')
'GR1601101050000010547023795'
>>> format('GR1601101050000010547023795')
'GR16 0110 1050 0000 1054 7023 795'
"""
import re
from stdnum import numdb
from stdnum.exceptions import *
from stdnum.iso7064 import mod_97_10
from stdnum.util import clean
# our open copy of the IBAN database
_ibandb = numdb.get('iban')
# the valid characters we have
_alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# regular expression to check IBAN structure
_struct_re = re.compile(r'([1-9][0-9]*)!([nac])')
def compact(number):
"""Convert the iban number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def _to_base10(number):
"""Prepare the number to its base10 representation (also moving the
check digits to the end) so it can be checked with the ISO 7064
Mod 97, 10 algorithm."""
# TODO: find out whether this should be in the mod_97_10 module
return ''.join(str(_alphabet.index(x)) for x in number[4:] + number[:4])
def _struct_to_re(structure):
"""Convert an IBAN structure to a refular expression that can be used
to validate the number."""
def conv(match):
chars = {
'n': '[0-9]',
'a': '[A-Z]',
'c': '[A-Za-z0-9]',
}[match.group(2)]
return '%s{%s}' % (chars, match.group(1))
return re.compile('^%s$' % _struct_re.sub(conv, structure))
def validate(number):
"""Checks to see if the number provided is a valid IBAN."""
number = compact(number)
try:
test_number = _to_base10(number)
except Exception:
raise InvalidFormat()
# ensure that checksum is valid
mod_97_10.validate(test_number)
# look up the number
info = _ibandb.info(number)
# check if the bban part of number has the correct structure
bban = number[4:]
if not _struct_to_re(info[0][1].get('bban', '')).match(bban):
raise InvalidFormat()
# return the compact representation
return number
def is_valid(number):
"""Checks to see if the number provided is a valid IBAN."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number, separator=' '):
"""Reformat the passed number to the space-separated format."""
number = compact(number)
return separator.join(number[i:i + 4] for i in range(0, len(number), 4))
|
agpl-3.0
| -2,954,733,184,539,268,600
| 32.886957
| 77
| 0.699769
| false
| 3.642056
| false
| false
| false
|
mysociety/manchester-survey
|
survey/migrations/0003_auto__chg_field_user_email.py
|
1
|
1991
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'User.email'
db.alter_column(u'survey_user', 'email', self.gf('django.db.models.fields.TextField')(unique=True, null=True))
def backwards(self, orm):
# Changing field 'User.email'
db.alter_column(u'survey_user', 'email', self.gf('django.db.models.fields.TextField')(default='', unique=True))
models = {
u'survey.item': {
'Meta': {'object_name': 'Item'},
'batch': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.User']"}),
'value': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'whenstored': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'survey.secret': {
'Meta': {'object_name': 'Secret'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'secret': ('django.db.models.fields.TextField', [], {})
},
u'survey.user': {
'Meta': {'object_name': 'User'},
'code': ('django.db.models.fields.TextField', [], {'unique': 'True', 'db_index': 'True'}),
'email': ('django.db.models.fields.TextField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['survey']
|
agpl-3.0
| -6,821,724,920,916,634,000
| 44.272727
| 119
| 0.5555
| false
| 3.574506
| false
| false
| false
|
MediaMath/qasino
|
lib/data_manager.py
|
1
|
15026
|
# Copyright (C) 2014 MediaMath, Inc. <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite_backend as sql_backend
import table_merger
import util
import qasino_table
import logging
import time
import re
import yaml
import sys
import thread
from twisted.internet import threads
from twisted.internet import task
from twisted.internet import reactor
class DataManager(object):
def __init__(self, use_dbfile, db_dir=None, signal_channel=None, archive_db_dir=None,
generation_duration_s=30):
self.saved_tables = {}
self.query_id = 0
self.views = {}
self.thread_id = thread.get_ident()
self.stats = {}
self.generation_duration_s = generation_duration_s
self.signal_channel = signal_channel
self.archive_db_dir = archive_db_dir
self.static_db_filepath = db_dir + '/qasino_table_store_static.db'
# Start with zero because we'll call rotate_dbs instantly below.
self.db_generation_number = 0
# use_dbfile can be:
# 'memory- -> use in memory db
# /%d/ -> use the provided template filename
# ! /%d/ -> use the filename (same db every generation)
self.one_db = False
self.db_name = use_dbfile
if use_dbfile == None:
self.db_name = "qasino_table_store_%d.db"
elif use_dbfile == "memory":
self.db_name = ":memory:"
self.one_db = True
elif use_dbfile.find('%d') == -1:
self.one_db = True
# Initialize some things
self.table_merger = table_merger.TableMerger(self)
# Add db_dir path
if db_dir != None and self.db_name != ":memory:":
self.db_name = db_dir + '/' + self.db_name
# Open the writer backend db.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_reader = None
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
self.sql_backend_writer_static = sql_backend.SqlConnections(self.static_db_filepath,
self,
self.archive_db_dir,
self.thread_id,
None)
# Make the data manager db rotation run at fixed intervals.
# This will also immediately make the call which will make the
# writer we just opened the reader and to open a new writer.
self.rotate_task = task.LoopingCall(self.async_rotate_dbs)
self.rotate_task.start(self.generation_duration_s)
def read_views(self, filename):
# Reset views
self.views = {}
try:
fh = open(filename, "r")
except Exception as e:
logging.info("Failed to open views file '%s': %s", filename, e)
return
try:
view_conf_obj = yaml.load(fh)
except Exception as e:
logging.info("Failed to parse view conf yaml file '%s': %s", filename, e)
return
for view in view_conf_obj:
try:
viewname = view["viewname"]
view = view["view"]
self.views[viewname] = { 'view' : view, 'loaded' : False, 'error' : '' }
except Exception as e:
logging.info("Failure getting view '%s': %s", view["viewname"] if "viewname" in view else 'unknown', e)
def get_query_id(self):
self.query_id += 1
return self.query_id
def shutdown(self):
self.rotate_task = None
self.sql_backend_reader = None
self.sql_backend_writer = None
def async_validate_and_route_query(self, sql, query_id, use_write_db=False):
if use_write_db:
return self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_writer)
else:
return self.sql_backend_reader.run_interaction(sql_backend.SqlConnections.READER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_reader)
def validate_and_route_query(self, txn, sql, query_id, sql_backend):
# So when dbs rotate we'll force a shutdown of the backend
# after a certain amount of time to avoid hung or long running
# things in this code path from holding dbs open. This
# may/will invalidate references we might have in here so wrap
# it all in a try catch...
try:
m = re.search(r"^\s*select\s+", sql, flags=re.IGNORECASE)
if m == None:
# Process a non-select statement.
return self.process_non_select(txn, sql, query_id, sql_backend)
# Process a select statement.
return sql_backend.do_select(txn, sql)
except Exception as e:
msg = "Exception in validate_and_route_query: {}".format(str(e))
logging.info(msg)
return { "retval" : 0, "error_message" : msg }
def process_non_select(self, txn, sql, query_id, sql_backend):
"""
Called for non-select statements like show tables and desc.
"""
# DESC?
m = re.search(r"^\s*desc\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
(retval, error_message, table) = sql_backend.do_desc(txn, m.group(1))
result = { "retval" : retval }
if error_message:
result["error_message"] = error_message
if table:
result["data"] = table
return result
# DESC VIEW?
m = re.search(r"^\s*desc\s+view\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT view FROM qasino_server_views WHERE viewname = '%s';" % m.group(1))
# SHOW tables?
m = re.search(r"^\s*show\s+tables\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables ORDER BY tablename;")
# SHOW tables with LIKE?
m = re.search(r"^\s*show\s+tables\s+like\s+('\S+')\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables WHERE tablename LIKE {} ORDER BY tablename;".format(m.group(1)) )
# SHOW connections?
m = re.search(r"^\s*show\s+connections\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_connections ORDER BY identity;")
# SHOW info?
m = re.search(r"^\s*show\s+info\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', generation_start_epoch, 'unixepoch') generation_start_datetime FROM qasino_server_info;")
# SHOW views?
m = re.search(r"^\s*show\s+views\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT viewname, loaded, errormsg FROM qasino_server_views ORDER BY viewname;")
# Exit?
m = re.search(r"^\s*(quit|logout|exit)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return { "retval" : 0, "error_message" : "Bye!" }
return { "retval" : 1, "error_message" : "ERROR: Unrecognized statement: %s" % sql }
def get_table_list(self):
return self.sql_backend_reader.tables
def insert_tables_table(self, txn, sql_backend_writer, sql_backend_writer_static):
table = qasino_table.QasinoTable("qasino_server_tables")
table.add_column("tablename", "varchar")
table.add_column("nr_rows", "int")
table.add_column("nr_updates", "int")
table.add_column("last_update_epoch", "int")
table.add_column("static", "int")
sql_backend_writer.add_tables_table_rows(table)
sql_backend_writer_static.add_tables_table_rows(table)
# the chicken or the egg - how do we add ourselves?
table.add_row( [ "qasino_server_tables",
table.get_nr_rows() + 1,
1,
time.time(),
0 ] )
return sql_backend_writer.add_table_data(txn, table, util.Identity.get_identity())
# This hack insures all the internal tables are inserted
# using the same sql_backend_writer and makes sure that the
# "tables" table is called last (after all the other internal
# tables are added).
def insert_internal_tables(self, txn, sql_backend_writer, sql_backend_reader, db_generation_number, time, generation_duration_s, views):
sql_backend_writer.insert_info_table(txn, db_generation_number, time, generation_duration_s)
sql_backend_writer.insert_connections_table(txn)
if sql_backend_reader != None:
sql_backend_writer.insert_sql_stats_table(txn, sql_backend_reader)
sql_backend_writer.insert_update_stats_table(txn)
# this should be second last so views can be created of any tables above.
# this means though that you can not create views of any tables below.
sql_backend_writer.add_views(txn, views)
sql_backend_writer.insert_views_table(txn, views)
# this should be last to include all the above tables
self.insert_tables_table(txn, sql_backend_writer, self.sql_backend_writer_static)
def async_rotate_dbs(self):
"""
Kick off the rotate in a sqlconnection context because we have
some internal tables and views to add before we rotate dbs.
"""
self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION, self.rotate_dbs)
def rotate_dbs(self, txn):
"""
Make the db being written to be the reader db.
Open a new writer db for all new updates.
"""
logging.info("**** DataManager: Starting generation %d", self.db_generation_number)
# Before making the write db the read db,
# add various internal info tables and views.
self.insert_internal_tables(txn,
self.sql_backend_writer,
self.sql_backend_reader,
self.db_generation_number,
time.time(),
self.generation_duration_s,
self.views)
# Increment the generation number.
self.db_generation_number = int(time.time())
# Set the writer to a new db
save_sql_backend_writer = self.sql_backend_writer
# If specified put the generation number in the db name.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
# Set the reader to what was the writer
# Note the reader will (should) be deconstructed here.
# Just in case something else is holding a ref to the reader
# (indefinitely!?) force a shutdown of this backend after a
# certain amount of time though.
if self.sql_backend_reader:
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.writer_dbpool,
self.sql_backend_reader.filename,
None)
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.reader_dbpool,
self.sql_backend_reader.filename,
self.sql_backend_reader.archive_db_dir)
self.sql_backend_reader = save_sql_backend_writer
# Load saved tables.
self.async_add_saved_tables()
# Lastly blast out the generation number.
if self.signal_channel != None:
self.signal_channel.send_generation_signal(self.db_generation_number, self.generation_duration_s)
def check_save_table(self, table, identity):
tablename = table.get_tablename()
key = tablename + identity
if table.get_property('persist'):
self.saved_tables[key] = { "table" : table, "tablename" : tablename, "identity" : identity }
else:
# Be sure to remove a table that is no longer persisting.
if key in self.saved_tables:
del self.saved_tables[key]
def async_add_saved_tables(self):
for key, table_data in self.saved_tables.iteritems():
logging.info("DataManager: Adding saved table '%s' from '%s'", table_data["tablename"], table_data["identity"])
self.sql_backend_writer.async_add_table_data(table_data["table"], table_data["identity"])
|
apache-2.0
| 5,960,669,923,601,375,000
| 36.753769
| 235
| 0.55943
| false
| 4.053412
| false
| false
| false
|
jjdmol/LOFAR
|
LCS/PyCommon/postgres.py
|
1
|
11659
|
#!/usr/bin/python
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
# $Id$
'''
Module with nice postgres helper methods and classes.
'''
import logging
from threading import Thread, Lock
from Queue import Queue, Empty
import select
import psycopg2
import psycopg2.extras
import psycopg2.extensions
logger = logging.getLogger(__name__)
def makePostgresNotificationQueries(schema, table, action, view_for_row=None, view_selection_id=None):
action = action.upper()
if action not in ('INSERT', 'UPDATE', 'DELETE'):
raise ValueError('''trigger_type '%s' not in ('INSERT', 'UPDATE', 'DELETE')''' % action)
if view_for_row and action == 'DELETE':
raise ValueError('You cannot use a view for results on action DELETE')
if view_for_row:
change_name = '''{table}_{action}_with_{view_for_row}'''.format(schema=schema,
table=table,
action=action,
view_for_row=view_for_row)
function_name = '''NOTIFY_{change_name}'''.format(change_name=change_name)
function_sql = '''
CREATE OR REPLACE FUNCTION {schema}.{function_name}()
RETURNS TRIGGER AS $$
DECLARE
new_row_from_view {schema}.{view_for_row}%ROWTYPE;
BEGIN
select * into new_row_from_view from {schema}.{view_for_row} where {view_selection_id} = NEW.id LIMIT 1;
PERFORM pg_notify(CAST('{change_name}' AS text),
'{{"old":' || {old} || ',"new":' || row_to_json(new_row_from_view)::text || '}}');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''.format(schema=schema,
function_name=function_name,
table=table,
action=action,
old='row_to_json(OLD)::text' if action == 'UPDATE' or action == 'DELETE' else '\'null\'',
view_for_row=view_for_row,
view_selection_id=view_selection_id if view_selection_id else 'id',
change_name=change_name.lower())
else:
change_name = '''{table}_{action}'''.format(table=table, action=action)
function_name = '''NOTIFY_{change_name}'''.format(change_name=change_name)
function_sql = '''
CREATE OR REPLACE FUNCTION {schema}.{function_name}()
RETURNS TRIGGER AS $$
BEGIN
PERFORM pg_notify(CAST('{change_name}' AS text),
'{{"old":' || {old} || ',"new":' || {new} || '}}');
RETURN {value};
END;
$$ LANGUAGE plpgsql;
'''.format(schema=schema,
function_name=function_name,
table=table,
action=action,
old='row_to_json(OLD)::text' if action == 'UPDATE' or action == 'DELETE' else '\'null\'',
new='row_to_json(NEW)::text' if action == 'UPDATE' or action == 'INSERT' else '\'null\'',
value='OLD' if action == 'DELETE' else 'NEW',
change_name=change_name.lower())
trigger_name = 'TRIGGER_NOTIFY_%s' % function_name
trigger_sql = '''
CREATE TRIGGER {trigger_name}
AFTER {action} ON {schema}.{table}
FOR EACH ROW
EXECUTE PROCEDURE {schema}.{function_name}();
'''.format(trigger_name=trigger_name,
function_name=function_name,
schema=schema,
table=table,
action=action)
drop_sql = '''
DROP TRIGGER IF EXISTS {trigger_name} ON {schema}.{table} CASCADE;
DROP FUNCTION IF EXISTS {schema}.{function_name}();
'''.format(trigger_name=trigger_name,
function_name=function_name,
schema=schema,
table=table)
sql = drop_sql + '\n' + function_sql + '\n' + trigger_sql
sql_lines = '\n'.join([s.strip() for s in sql.split('\n')]) + '\n'
return sql_lines
class PostgresListener(object):
''' This class lets you listen to postgress notifications
It execute callbacks when a notifocation occurs.
Make your own subclass with your callbacks and subscribe them to the appriate channel.
Example:
class MyListener(PostgresListener):
def __init__(self, host, database, username, password):
super(MyListener, self).__init__(host=host, database=database, username=username, password=password)
self.subscribe('foo', self.foo)
self.subscribe('bar', self.bar)
def foo(self, payload = None):
print "Foo called with payload: ", payload
def bar(self, payload = None):
print "Bar called with payload: ", payload
with MyListener(...args...) as listener:
#either listen like below in a loop doing stuff...
while True:
#do stuff or wait,
#the listener calls the callbacks meanwhile in another thread
#... or listen like below blocking
#while the listener calls the callbacks meanwhile in this thread
listener.waitWhileListening()
'''
def __init__(self,
host='',
database='',
username='',
password=''):
'''Create a new PostgresListener'''
self.conn = psycopg2.connect(host=host,
user=username,
password=password,
database=database)
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.conn.cursor()
self.__listening = False
self.__lock = Lock()
self.__callbacks = {}
self.__waiting = False
self.__queue = Queue()
def subscribe(self, notification, callback):
'''Subscribe to a certain postgres notification.
Call callback method in case such a notification is received.'''
logger.info("Subscribed %sto %s" % ('and listening ' if self.isListening() else '', notification))
with self.__lock:
self.cursor.execute("LISTEN %s;", (psycopg2.extensions.AsIs(notification),))
self.__callbacks[notification] = callback
def unsubscribe(self, notification):
'''Unubscribe from a certain postgres notification.'''
logger.info("Unsubscribed from %s" % notification)
with self.__lock:
self.cursor.execute("UNLISTEN %s;", (psycopg2.extensions.AsIs(notification),))
if notification in self.__callbacks:
del self.__callbacks[notification]
def isListening(self):
'''Are we listening? Has the listener been started?'''
with self.__lock:
return self.__listening
def start(self):
'''Start listening. Does nothing if already listening.
When using the listener in a context start() and stop()
are called upon __enter__ and __exit__
This method return immediately.
Listening and calling callbacks takes place on another thread.
If you want to block processing and call the callbacks on the main thread,
then call waitWhileListening() after start.
'''
if self.isListening():
return
logger.info("Started listening to %s" % ', '.join([str(x) for x in self.__callbacks.keys()]))
def eventLoop():
while self.isListening():
if select.select([self.conn],[],[],2) != ([],[],[]):
self.conn.poll()
while self.conn.notifies:
try:
notification = self.conn.notifies.pop(0)
logger.debug("Received notification on channel %s payload %s" % (notification.channel, notification.payload))
if self.isWaiting():
# put notification on Queue
# let waiting thread handle the callback
self.__queue.put((notification.channel, notification.payload))
else:
# call callback on this listener thread
self._callCallback(notification.channel, notification.payload)
except Exception as e:
logger.error(str(e))
self.__thread = Thread(target=eventLoop)
self.__thread.daemon = True
self.__listening = True
self.__thread.start()
def stop(self):
'''Stop listening. (Can be restarted)'''
with self.__lock:
if not self.__listening:
return
self.__listening = False
self.__thread.join()
self.__thread = None
logger.info("Stopped listening")
self.stopWaiting()
def __enter__(self):
'''starts the listener upon contect enter'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''stops the listener upon contect enter'''
self.stop()
def _callCallback(self, channel, payload = None):
'''call the appropiate callback based on channel'''
try:
callback = None
with self.__lock:
if channel in self.__callbacks:
callback = self.__callbacks[channel]
if callback:
if payload:
callback(payload)
else:
callback()
except Exception as e:
logger.error(str(e))
def isWaiting(self):
'''Are we waiting in the waitWhileListening() method?'''
with self.__lock:
return self.__waiting
def stopWaiting(self):
'''break from the blocking waitWhileListening() method'''
with self.__lock:
if self.__waiting:
self.__waiting = False
logger.info("Continuing from blocking waitWhileListening")
def waitWhileListening(self):
'''
block calling thread until interrupted or
until stopWaiting is called from another thread
meanwhile, handle the callbacks on this thread
'''
logger.info("Waiting while listening to %s" % ', '.join([str(x) for x in self.__callbacks.keys()]))
with self.__lock:
self.__waiting = True
while self.isWaiting():
try:
notification = self.__queue.get(True, 1)
channel = notification[0]
payload = notification[1]
self._callCallback(channel, payload)
except KeyboardInterrupt:
# break
break
except Empty:
pass
self.stopWaiting()
|
gpl-3.0
| 248,377,241,846,325,470
| 38.388514
| 137
| 0.560854
| false
| 4.501544
| false
| false
| false
|
MegaShow/college-programming
|
Homework/Principles of Artificial Neural Networks/Week 8 Object Detection/datasets.py
|
1
|
2942
|
import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from utils import transform
class PascalVOCDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_folder, split, keep_difficult=False):
"""
:param data_folder: folder where data files are stored
:param split: split, one of 'TRAIN' or 'TEST'
:param keep_difficult: keep or discard objects that are considered difficult to detect?
"""
self.split = split.upper()
assert self.split in {'TRAIN', 'TEST'}
self.data_folder = data_folder
self.keep_difficult = keep_difficult
# Read data files
with open(os.path.join(data_folder, self.split + '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(data_folder, self.split + '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __getitem__(self, i):
# Read image
image = Image.open(self.images[i], mode='r')
image = image.convert('RGB')
# Read objects in this image (bounding boxes, labels, difficulties)
objects = self.objects[i]
boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)
labels = torch.LongTensor(objects['labels']) # (n_objects)
difficulties = torch.ByteTensor(objects['difficulties']) # (n_objects)
# Discard difficult objects, if desired
if not self.keep_difficult:
boxes = boxes[1 - difficulties]
labels = labels[1 - difficulties]
difficulties = difficulties[1 - difficulties]
# Apply transformations
image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split)
return image, boxes, labels, difficulties
def __len__(self):
return len(self.images)
def collate_fn(self, batch):
"""
Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader).
This describes how to combine these tensors of different sizes. We use lists.
Note: this need not be defined in this Class, can be standalone.
:param batch: an iterable of N sets from __getitem__()
:return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties
"""
images = list()
boxes = list()
labels = list()
difficulties = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
difficulties.append(b[3])
images = torch.stack(images, dim=0)
return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each
|
mit
| -8,096,475,863,341,076,000
| 33.611765
| 125
| 0.616927
| false
| 3.917443
| false
| false
| false
|
jeffmurphy/cif-db
|
src/DB/Exploder/Indexer.py
|
1
|
10137
|
import syslog
from datetime import datetime
import time
import re
import sys
import threading
import happybase
import struct
import hashlib
import base64
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import control_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import cifsupport
from DB.Salt import Salt
from DB.PrimaryIndex import PrimaryIndex
from DB.Log import Log
class Indexer(object):
"""
"""
def __init__ (self, connectionPool, index_type, num_servers = 1, table_batch_size = 1000, debug = 0):
self.debug = debug
print "indexer connect"
self.pool = connectionPool
print "indexer load primary index map"
self.primary_index = PrimaryIndex(connectionPool, debug)
print "index init log"
self.log = Log(connectionPool)
self.num_servers = num_servers
self.packers = {}
for packer in self.primary_index.names():
try:
package='DB.PrimaryIndex.PackUnpack'
self.L("loading packer " + package + "." + packer)
__import__(package + "." + packer)
pkg = sys.modules[package + "." + packer]
self.packers[packer] = getattr(pkg, packer)
except ImportError as e:
self.L("warning: failed to load " + packer)
with self.pool.connection() as dbh:
t = dbh.tables()
self.table_name = "index_" + index_type
if not self.table_name in t:
self.L("index table %s doesnt exist, creating it" % (self.table_name))
dbh.create_table(self.table_name, {'b': {'COMPRESSION': 'SNAPPY'}})
table_batch_size = 5
self.table = dbh.table(self.table_name).batch(batch_size=table_batch_size)
self.co_table = dbh.table("cif_objs").batch(batch_size=table_batch_size)
self.reset()
self.md5 = hashlib.md5()
self.salt = Salt(self.num_servers, self.debug)
def L(self, msg):
caller = ".".join([str(__name__), sys._getframe(1).f_code.co_name])
if self.debug != None:
print caller + ": " + msg
else:
self.log.L(caller + ": " + msg)
def pack_rowkey_ipv4(self, salt, addr):
return struct.pack(">HB", self.salt.next(), self.TYPE_IPV4()) + self.packers['ipv4'].pack(addr)
def pack_rowkey_ipv6(self, salt, addr):
return struct.pack(">HB", self.salt.next(), self.TYPE_IPV6()) + self.packers['ipv6'].pack(addr)
def pack_rowkey_fqdn(self, salt, fqdn):
return struct.pack(">HB", self.salt.next(), self.TYPE_FQDN()) + self.packers['domain'].pack(fqdn)
def pack_rowkey_url(self, salt, url):
return struct.pack(">HB", self.salt.next(), self.TYPE_URL()) + self.packers['url'].pack(url)
def pack_rowkey_email(self, salt, email):
return struct.pack(">HB", self.salt.next(), self.TYPE_URL()) + self.packers['email'].pack(email)
def pack_rowkey_search(self, salt, search):
return struct.pack(">HB", self.salt.next(), self.TYPE_SEARCH()) + self.packers['search'].pack(search)
def pack_rowkey_malware(self, salt, malware_hash):
return struct.pack(">HB", self.salt.next(), self.TYPE_MALWARE()) + self.packers['malware'].pack(malware_hash)
def pack_rowkey_asn(self, salt, asn):
return struct.pack(">HB", self.salt.next(), self.TYPE_ASN()) + self.packers['asn'].pack(asn)
def reset(self):
self.empty = True
self.addr = None
self.rowkey = None
self.confidence = None
self.addr_type = None
self.iodef_rowkey = None
def commit(self):
"""
Commit the record to the index_* table
Update cif_objs(rowkey=self.iodef_rowkey) so that 'b:{self.table_name}_{self.rowkey}' = 1
Purger will remove the reference when this feed record is purged.
With hbase, you can put an addt'l cell value into a table/row without having to
merge. Existing cells won't be affected.
"""
try:
rowdict = {
'b:confidence': str(self.confidence),
'b:addr_type': str(self.addr_type),
'b:iodef_rowkey': str(self.iodef_rowkey)
}
self.table.put(self.rowkey, rowdict)
fmt = "%ds" % (len(self.table_name) + 4)
prk = struct.pack(fmt, "cf:" + str(self.table_name) + "_") + self.rowkey
self.co_table.put(self.iodef_rowkey, { prk: "1" })
except Exception as e:
self.L("failed to put record to %s table: " % self.table_name)
print e
self.reset()
def extract(self, iodef_rowkey, iodef):
"""
FIX atm this is iodef specific. ideally we will be able to index other document types
"""
self.reset()
self.iodef_rowkey = iodef_rowkey
self.md5.update(iodef.SerializeToString())
self.hash = self.md5.digest()
ii = iodef.Incident[0]
#print ii
self.confidence = ii.Assessment[0].Confidence.content
self.severity = ii.Assessment[0].Impact[0].severity
# for malware hashes, they appear at the top level for now
# iodef.incident[].additionaldata.meaning = "malware hash"
# iodef.incident[].additionaldata.content = "[the hash]"
if hasattr(ii, 'AdditionalData'):
for ed in ii.AdditionalData:
#print "ED ", ed
if ed.meaning == "malware hash":
self.L("\tIndexing for malware hash")
self.rowkey = self.pack_rowkey_malware(self.salt.next(), ed.content)
self.commit()
# addresses and networks are in the EventData[].Flow[].System[] tree
if len(ii.EventData) > 0 or hasattr(ii, 'EventData'):
for ed in ii.EventData:
for fl in ed.Flow:
for sy in fl.System:
for i in sy.Node.Address:
self.addr_type = i.category
if self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv4_addr or self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv4_net:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv4(self.salt.next(), self.addr)
self.L("Indexing for ipv4")
self.commit()
# ipv6 addresses and networks
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv6_addr or self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv6_net:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv6(self.salt.next(), self.addr)
self.L("Indexing for ipv6")
self.commit()
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_asn:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv6(self.salt.next(), self.addr)
self.L("Indexing for ASN")
self.commit()
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ext_value:
if i.ext_category == "fqdn":
self.fqdn = i.content
self.rowkey = self.pack_rowkey_fqdn(self.salt.next(), self.fqdn)
self.L("Indexing for FQDDN")
self.commit()
elif i.ext_category == "url":
self.rowkey = self.pack_rowkey_url(self.salt.next(), i.content)
self.L("Indexing for URL")
self.commit()
else:
e = self.primary_index.enum(i.ext_category)
if len(e) > 0:
self.rowkey = struct.pack(">HB", self.salt.next(), e[0]) + self.packers[i.ext_category].pack(i.content)
self.commit()
else:
self.L("Unknown primary index given " + i.ext_category)
else:
print "unhandled category: ", i
def TYPE_IPV4(self):
return self.primary_index.enum('ipv4')
def TYPE_IPV6(self):
return self.primary_index.enum('ipv6')
def TYPE_FQDN(self):
return self.primary_index.enum('domain')
def TYPE_URL(self):
return self.primary_index.enum('url')
def TYPE_EMAIL(self):
return self.primary_index.enum('email')
def TYPE_SEARCH(self):
return self.primary_index.enum('search')
def TYPE_MALWARE(self):
return self.primary_index.enum('malware')
def TYPE_ASN(self):
return self.primary_index.enum('asn')
|
bsd-3-clause
| -6,272,114,305,446,935,000
| 39.710843
| 191
| 0.494722
| false
| 4.122407
| false
| false
| false
|
Zen-CODE/kivy
|
kivy/core/clipboard/__init__.py
|
1
|
3959
|
'''
Clipboard
=========
Core class for accessing the Clipboard. If we are not able to access the
system clipboard, a fake one will be used.
Usage example::
>>> from kivy.core.clipboard import Clipboard
>>> Clipboard.get_types()
['TIMESTAMP', 'TARGETS', 'MULTIPLE', 'SAVE_TARGETS', 'UTF8_STRING',
'COMPOUND_TEXT', 'TEXT', 'STRING', 'text/plain;charset=utf-8',
'text/plain']
>>> Clipboard.get('TEXT')
'Hello World'
>>> Clipboard.put('Great', 'UTF8_STRING')
>>> Clipboard.get_types()
['UTF8_STRING']
>>> Clipboard.get('UTF8_STRING')
'Great'
.. note:: The main implementation relies on Pygame and works well with
text/strings. Anything else might not work the same on all platforms.
'''
__all__ = ('ClipboardBase', 'Clipboard')
from kivy.core import core_select_lib
from kivy.utils import platform
class ClipboardBase(object):
def get(self, mimetype):
'''Get the current data in clipboard, using the mimetype if possible.
You not use this method directly. Use :meth:`paste` instead.
'''
return None
def put(self, data, mimetype):
'''Put data on the clipboard, and attach a mimetype.
You should not use this method directly. Use :meth:`copy` instead.
'''
pass
def get_types(self):
'''Return a list of supported mimetypes
'''
return []
def _ensure_clipboard(self):
''' Ensure that the clipboard has been properly initialised.
'''
if hasattr(self, '_clip_mime_type'):
return
if platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 encoding
self._encoding = 'utf-16'
elif platform == 'linux':
self._clip_mime_type = 'UTF8_STRING'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
.. versionadded:: 1.9.0
'''
if data:
self._copy(data)
def paste(self):
''' Get text from the system clipboard and return it a usable string.
.. versionadded:: 1.9.0
'''
return self._paste()
def _copy(self, data):
# explicitly terminate strings with a null character
# so as to avoid putting spurious data after the end.
# MS windows issue.
self._ensure_clipboard()
data = data.encode(self._encoding) + b'\x00'
self.put(data, self._clip_mime_type)
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = self.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
return data
return u''
# load clipboard implementation
_clipboards = []
_platform = platform
if _platform == 'android':
_clipboards.append(
('android', 'clipboard_android', 'ClipboardAndroid'))
elif _platform in ('macosx', 'linux', 'win'):
_clipboards.append(
('sdl2', 'clipboard_sdl2', 'ClipboardSDL2'))
_clipboards.append(
('pygame', 'clipboard_pygame', 'ClipboardPygame'))
_clipboards.append(
('dummy', 'clipboard_dummy', 'ClipboardDummy'))
Clipboard = core_select_lib('clipboard', _clipboards, True)
del _clipboards
del _platform
|
mit
| 7,110,245,703,573,393,000
| 28.766917
| 79
| 0.591816
| false
| 3.896654
| false
| false
| false
|
mscook/BanzaiDB
|
src/BanzaiDB/core.py
|
1
|
12137
|
# Copyright 2013 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
import os
from Bio import SeqIO
from BanzaiDB import parsers
#def bring_CDS_to_front(line):
# """
#
# """
# for e in feat_list:
# if e[]
def nway_reportify(nway_any_file):
"""
Convert a nway.any to something similar to report.txt
This converts the nway.any which contains richer information (i.e. N
calls) into something similar to report.txt
TODO: Add a simple example of input vs output of this method.
ref_id, position, strains, ref_base, v_class, changes, evidence,
consequences
:param nway_any_file: full path as a string the the file nway.any file
:type nway_any_file: string
:returns: a list of tuples. Each list element refers to a variant
position while the tuple contains the states of each strain
"""
parsed = []
nway_any_file = os.path.expanduser(nway_any_file)
if not os.path.isfile(nway_any_file):
print "Please specify a valid Nesoni n-way (any) SNP comparison file"
sys.exit(1)
else:
with open(nway_any_file, 'r') as f:
strains = f.readline().strip().split()[5:-1]
num_strains = len(strains)/3
strains = strains[:num_strains]
for line in f:
uncalled = False
cur = line.split("\t")
ref_id, position, v_class, ref_base = cur[0], int(cur[1]), cur[2], cur[3]
changes = cur[4:num_strains+4]
if 'N' in changes:
uncalled = True
evidence = cur[num_strains+4:(2*(num_strains))+4]
consequences = cur[(2*(num_strains))+4:-1]
# Something is broken if not true -
assert len(strains) == len(changes) == len(evidence)
results = zip([ref_id]*num_strains, [position]*num_strains, strains,
[ref_base]*num_strains, [v_class]*num_strains,
changes, evidence, consequences,
[uncalled]*num_strains)
parsed.append(results)
return parsed
def extract_consequences(cons, ftype):
"""
Extracts out the data from a consequences line
NOTE: This was originally the core of Nesoni_report_to_JSON. However, as
v_class is singular BUT substitution states are observed in deletion
states and other similar we refactored this method out.
:param cons: a consequences line
:param ftype: a feature type (substitution, insertion or deletion
:type cons: string
:type ftype: string
:returns: a data list (containing a controlled set of results)
"""
# May need to add more of these below
misc_set = ['tRNA', 'gene', 'rRNA']
# Handle mixed features in the input reference. This nneds to be more
# generic
mixed = cons.split(',')
if len(mixed) == 2:
# CDS is second
if mixed[1][1:4] == 'CDS':
cons = str(mixed[1][1:-1])+", "+mixed[0]+"\n"
# Work with CDS
if cons.strip() != '' and cons.split(' ')[0] == 'CDS':
if ftype.find("substitution") != -1:
# 0 1 2 3 4 5 6 7
# class|sub_type|locus_tag|base|codon|region|old_aa|new_aa|
# 8 9
# protein|correlated
dat = ('substitution',) + parsers.parse_substitution(cons)
elif ftype.find("insertion") != -1:
dat = ('insertion', None) + parsers.parse_insertion(cons)
elif ftype.find("deletion") != -1:
dat = ('deletion', None) + parsers.parse_deletion(cons)
else:
raise Exception("Unsupported. Only SNPs & INDELS")
dat = list(dat)
dat[3] = int(dat[3])
dat[4] = int(dat[4])
elif cons.strip() != '' and cons.split(' ')[0] in misc_set:
if ftype.find("substitution") != -1:
dat = (('substitution',) +
parsers.parse_substitution_misc(cons))
elif ftype.find("insertion") != -1:
dat = (('insertion', None) +
parsers.parse_insertion_misc(cons))
elif ftype.find("deletion") != -1:
dat = (('deletion', None) +
parsers.parse_deletion_misc(cons))
else:
raise Exception("Unsupported. Only SNPs & INDELS")
dat = list(dat)
dat[3] = int(dat[3])
else:
dat = [ftype.split('-')[0]]+[None]*9
return dat
def nesoni_report_to_JSON(reportified):
"""
Convert a nesoni nway.any file that has been reportified to JSON
See: tables.rst for info on what is stored in RethinkDB
:param reportified: the reportified nway.any file (been through
nway_reportify()). This is essentially a list of tuples
:returns: a list of JSON
"""
stats = {}
parsed_list = []
for position in reportified:
for elem in position:
skip = False
ref_id, pos, strain, old, ftype, new, evidence, cons, uncalled = elem
ref_id = '.'.join(ref_id.split('.')[:-1])
# Initialise the stats...
if strain not in stats:
stats[strain] = 0
if new == old:
# Have no change
#dat = ["conserved"]+[None]*9
skip = True
elif new == 'N':
# Have an uncalled base
#dat = ["uncalled"]+[None]*9
skip = True
# Check for mixtures...
elif ftype == "substitution" and new.find('-') != -1:
# Deletion hidden in substitution
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "substitution" and len(new) > 1:
# Insertion hidden in substitution
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in deletions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) > 1:
# Insertion hidden in deletions
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') != -1:
# Deletion hidden in insertions
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in insertions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
# We have the same change state across all strains
else:
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
obs_count = parsers.parse_evidence(evidence)
# Some simple tests
the_classes = ['insertion', 'deletion', 'substitution']
if not skip:
assert dat[0] in the_classes
json = {"id": strain+'_'+ref_id+'_'+str(pos),
"StrainID": strain,
"Position": pos,
"LocusTag": dat[2],
"Class": dat[0],
"SubClass": dat[1],
"RefBase": old,
"ChangeBase": new,
"CDSBaseNum": dat[3],
"CDSAANum": dat[4],
"CDSRegion": dat[5],
"RefAA": dat[6],
"ChangeAA": dat[7],
"Product": dat[8],
"CorrelatedChange": dat[9],
"Evidence": obs_count,
"UncalledBlock": uncalled
}
parsed_list.append(json)
return parsed_list, stats
def reference_genome_features_to_JSON(genome_file):
"""
From genome reference (GBK format) convert CDS, gene & RNA features to JSON
The following 2 are really good resources:
* http://www.ncbi.nlm.nih.gov/books/NBK63592/
* http://www.ncbi.nlm.nih.gov/genbank/genomesubmit_annotation
.. note:: also see tables.rst for detailed description of the JSON
schema
.. warning:: do not think that this handles misc_features
:param genome_file: the fullpath as a string to the genbank file
:returns: a JSON representing the the reference and a list of JSON
containing information on the features
"""
misc_set = ['tRNA', 'rRNA', 'tmRNA', 'ncRNA']
with open(genome_file) as fin:
genome = SeqIO.read(fin, "genbank")
gd, gn, gid = genome.description, genome.name, genome.id
print "Adding %s into the RethinkDB instance" % (gd)
JSON_r = {'revision': int(gid.split('.')[-1]),
'reference_name': gd,
'id': gn}
parsed_list = []
for feat in genome.features:
start = int(feat.location.start.position)
JSON_f = {'sequence': str(feat.extract(genome.seq)),
'start': start,
'end': int(feat.location.end.position),
'strand': int(feat.strand),
'reference_id': gid,
'product': None,
'translation': None,
'locus_tag': None}
# Handle CDS, gene, tRNA & rRNA features
# Do CDS
if feat.type == 'CDS':
locus_tag = feat.qualifiers['locus_tag'][0]
JSON_f['id'] = gid+"_"+locus_tag+"_CDS"
JSON_f['locus_tag'] = locus_tag
if 'pseudo' not in feat.qualifiers:
JSON_f['translation'] = feat.qualifiers['translation'][0]
JSON_f['product'] = feat.qualifiers['product'][0]
else:
JSON_f['product'] = 'pseudo'
parsed_list.append(JSON_f)
# Do gene
elif feat.type == 'gene':
locus_tag = feat.qualifiers['locus_tag'][0]
JSON_f['id'] = gid+"_"+locus_tag+"_gene"
if 'pseudo' not in feat.qualifiers:
try:
JSON_f['product'] = feat.qualifiers['gene'][0]
except:
pass
else:
JSON_f['product'] = 'pseudo'
parsed_list.append(JSON_f)
# Do other (*RNA)
elif feat.type in misc_set:
try:
JSON_f['product'] = feat.qualifiers['product'][0]
except KeyError:
JSON_f['product'] = None
JSON_f['id'] = gid+"_"+str(JSON_f['start'])+"-"+str(JSON_f['end'])
parsed_list.append(JSON_f)
else:
print "Skipped feature at %i to %i " % (JSON_f['start'],
JSON_f['end'])
return JSON_r, parsed_list
|
apache-2.0
| -6,192,113,771,092,387,000
| 39.188742
| 89
| 0.517673
| false
| 4.037591
| false
| false
| false
|
jaantollander/CrowdDynamics
|
crowddynamics/core/geometry.py
|
1
|
4119
|
"""Functions for manipulating Shapely geometry objects
References:
- http://toblerity.org/shapely/manual.html
"""
from collections import Iterable
from functools import reduce
from itertools import chain
from typing import Callable
import numpy as np
import shapely.geometry as geometry
import skimage.draw
from shapely import speedups
from shapely.geometry import Polygon, LineString, Point
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from crowddynamics.core.structures import obstacle_type_linear
if speedups.available:
speedups.enable()
class GeomTypes(object):
POINT = 0.0
LINESTRING = 1.0
POLYGON_HOLE = 2.0
POLYGON_SHELL = 3.0
def _geom_to_array(geom: BaseGeometry):
if isinstance(geom, geometry.Point):
yield np.array([(np.nan, GeomTypes.POINT)])
yield np.asarray(geom.coords)
elif isinstance(geom, geometry.LineString):
yield np.array([(np.nan, GeomTypes.LINESTRING)])
yield np.asarray(geom.coords)
elif isinstance(geom, geometry.Polygon):
for interior in geom.interiors:
yield np.array([(np.nan, GeomTypes.POLYGON_HOLE)])
yield np.asarray(interior)
yield np.array([(np.nan, GeomTypes.POLYGON_SHELL)])
yield np.asarray(geom.exterior)
elif isinstance(geom, BaseMultipartGeometry):
return chain.from_iterable(map(geom_to_array, geom))
else:
raise TypeError
def geom_to_array(geom: BaseGeometry):
"""Breaking geometry object into continuous array where objects are
separated by array of elements (np.nan, FLAG)
Args:
geom:
Returns:
"""
return np.concatenate(list(_geom_to_array(geom)))
def geom_to_linesegment(geom: BaseGeometry):
"""Converts shapes to point pairs.
>>> ls = LineString([(1, 2), (3, 4)])
>>> list(geom_to_linesegment(ls))
[((1.0, 2.0), (3.0, 4.0))]
>>> poly = Polygon([(5, 6), (7, 8), (9, 10)])
>>> list(geom_to_linesegment(poly))
[((5.0, 6.0), (7.0, 8.0)),
((7.0, 8.0), (9.0, 10.0)),
((9.0, 10.0), (5.0, 6.0))]
>>> list(geom_to_linesegment(ls | poly))
[((1.0, 2.0), (3.0, 4.0)),
((5.0, 6.0), (7.0, 8.0)),
((7.0, 8.0), (9.0, 10.0)),
((9.0, 10.0), (5.0, 6.0))]
Args:
geom (BaseGeometry): BaseGeometry type.
Returns:
Iterable[LineSegment]: Iterable of linesegments
"""
if isinstance(geom, Point):
return iter(())
elif isinstance(geom, LineString):
return zip(geom.coords[:-1], geom.coords[1:])
elif isinstance(geom, Polygon):
return zip(geom.exterior.coords[:-1], geom.exterior.coords[1:])
elif isinstance(geom, BaseMultipartGeometry):
return chain.from_iterable(map(geom_to_linesegment, geom))
else:
raise TypeError('Argument is not subclass of {}'.format(BaseGeometry))
def geom_to_linear_obstacles(geom):
"""Converts shape(s) to array of linear obstacles."""
segments = [] if geom is None else list(geom_to_linesegment(geom))
return np.array(segments, dtype=obstacle_type_linear)
def draw_geom(geom: BaseGeometry,
grid,
indicer: Callable,
value):
"""Draw geom to grid"""
if isinstance(geom, Point):
pass
elif isinstance(geom, LineString):
for line in geom_to_linesegment(geom):
r0, c0, r1, c1 = indicer(line).flatten()
x, y = skimage.draw.line(r0, c0, r1, c1)
grid[y, x] = value
elif isinstance(geom, Polygon):
i = indicer(geom.exterior)
x, y = skimage.draw.polygon(i[:, 0], i[:, 1])
grid[y, x] = value
x, y = skimage.draw.polygon_perimeter(i[:, 0], i[:, 1])
grid[y, x] = value
for j in map(indicer, geom.interiors):
x, y = skimage.draw.polygon(j[:, 0], j[:, 1])
grid[y, x] = 0
elif isinstance(geom, BaseMultipartGeometry):
for geo in geom:
draw_geom(geo, grid, indicer, value)
else:
raise TypeError
def union(*geoms):
"""Union of geometries"""
return reduce(lambda x, y: x | y, geoms)
|
gpl-3.0
| 8,994,286,143,035,750,000
| 29.511111
| 78
| 0.61714
| false
| 3.373464
| false
| false
| false
|
Riverscapes/pyBRAT
|
SupportingTools/BatchScripts/01_BatchInputPrep/07_MergePerennialCanals_Batch.py
|
1
|
2133
|
#----------------------------------------------------------------------------
# Name: Merge Perennial & Canals (Batch)
#
# Purpose: Merges perennial network and canals/ditches shapefile
#
# Date: March 2019
# Author: Maggie Hallerud
#----------------------------------------------------------------------------
# user defined paths
# pf_path - project folder path for batch processing
pf_path = r'C:\Users\ETAL\Desktop\GYE_BRAT\wrk_Data'
# import required modules
import arcpy
import os
arcpy.CheckOutExtension('Spatial')
def main():
# set up arcpy environment
arcpy.env.workspace = 'in_memory'
arcpy.env.overwriteOutput = True
os.chdir(pf_path)
# list all folders in parent folder - note this is not recursive
dir_list = filter(lambda x: os.path.isdir(x), os.listdir('.'))
# remove folders in the list that start with '00_' since these aren't the HUC8 watersheds
for dir in dir_list[:]:
if dir.startswith('00_'):
dir_list.remove(dir)
# merges perennial and canals/ditches shapefiles and save as 'NHD_24k_Perennial_CanalsDitches.shp'
for dir in dir_list:
# specifying input perennial and canal shapefiles and output shapefile name
perennial_shp = os.path.join(pf_path, dir, 'NHD/NHD_24k_Perennial.shp')
canal_shp = os.path.join(pf_path, dir, 'NHD/NHDCanalsDitches.shp')
out_shp = os.path.join(pf_path, dir, 'NHD/NHD_24k_Perennial_CanalsDitches.shp')
# if canals exist then merge with perennial, otherwise just copy perennial
if os.path.exists(perennial_shp):
print "Merging perennial and canal shapefiles for " + dir
try:
if os.path.exists(canal_shp):
arcpy.Merge_management([perennial_shp, canal_shp], out_shp)
else:
arcpy.CopyFeatures_management(perennial_shp, out_shp)
# catch errors and move to the next huc8 folder
except Exception as err:
print "Error with " + dir + ". Exception thrown was: "
print err
if __name__ == "__main__":
main()
|
gpl-3.0
| -4,074,758,029,799,556,000
| 34.55
| 102
| 0.593999
| false
| 3.56689
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.