repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
code-google-com/gpicsync
|
refs/heads/master
|
geoexif.py
|
19
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
###############################################################################
# EXIF reader utility in relation to geolalisation
# This script is released under the GPL license v2
#
# francois.schnell (http://francois.schnell.free.fr)
#
# Contributors, see: http://code.google.com/p/gpicsync/wiki/Contributions
#
# This script is released under the GPL license version 2 license
#
# This script use the GPL exiftool.exe app. see:
# http://www.sno.phy.queensu.ca/%7Ephil/exiftool/
###############################################################################
import os,sys
class GeoExif(object):
"""
A class to read and write few EXIF tags in .jpg pictures which can be
usefull for geolalisation scripts.
"""
def __init__(self,picture):
self.picPath=picture
self.xmpOption=False
if self.xmpOption==True:
if os.path.basename(picture).find(".CRW")>0\
or os.path.basename(picture).find(".CR2")>0:
self.sidecarFile = os.path.splitext(picture)[0]+".xmp"
#print ">>>>> self.sidecarFile",self.sidecarFile
else:
self.sidecarFile = ''
if sys.platform == 'win32':
self.exifcmd = 'exiftool.exe'
else:
self.exifcmd = 'exiftool'
def readExifAll(self):
"""read all exif tags and return a string of the result"""
result=os.popen('%s -n "%s"' % (self.exifcmd, self.picPath)).read()
return result
def readDateTime(self):
"""
Read the time and date when the picture was taken if available
and return a list containing two strings [date,time]
like ['2007:02:12', '16:09:10']
"""
#result=os.popen('exiftool.exe -CreateDate "%s"' % self.picPath).read()
result=os.popen('%s -DateTimeOriginal "%s"' % (self.exifcmd, self.picPath)).read()
timeDate= [result[34:44],result[45:53]]
return timeDate
def readDateTimeSize(self):
"""
Read the time / date when the picture was taken (if available)
plus the size of the picture
Returns a list containing strings[date,time,width,height]
like ['2007:02:12', '16:09:10','800','600']
"""
answer=os.popen('%s -DateTimeOriginal -ImageSize "%s"' % (self.exifcmd, self.picPath)).read()
#print "readDateTimeSize answer", answer
if "Date" in answer:
date=answer[34:44]
time=answer[45:53]
else:
date,time="nodate","notime"
#timeDate= [answer[34:44],answer[45:53]]
try:
size=answer.split("Image Size")[1].split(":")[1].strip().split("x")
width=size[0]
height=size[1]
except:
width=640
height=480
return [date,time,width,height]
def readLatitude(self):
"""read the latitute tag is available and return a float"""
result=os.popen('%s -n -GPSLatitude -GPSLatitudeRef "%s" ' % (self.exifcmd, self.picPath)).read().split("\n")
#print result
if len(result)>1:
latitude=float(result[0].split(":")[1])
#print "latitude= ",latitude
return latitude
else:
return "None"
def readLongitude(self):
"""read the longitude tag if available"""
result=os.popen('%s -n -GPSLongitude -GPSLongitudeRef "%s" ' % (self.exifcmd, self.picPath)).read().split("\n")
#print result
if len(result)>1:
longitude=float(result[0].split(":")[1])
#print "longitude= ",longitude
return longitude
else:
return "None"
def readLatLong(self):
"""read latitude AND longitude at the same time"""
result=os.popen('%s -n -GPSLatitude -GPSLatitudeRef \
-GPSLongitude -GPSLongitudeRef "%s" ' \
% (self.exifcmd, self.picPath)).read().split("\n")
print result
if len(result)>=4:
result[0]=result[0].split(":")[1].strip()
try:
latDecimal=result[0].split(".")[1][0:]
except:
latDecimal="0"
result[0]=result[0].split(".")[0]+"."+latDecimal
result[1]=result[1].split(":")[1].strip()
result[2]=result[2].split(":")[1].strip()
try:
longDecimal=result[2].split(".")[1][0:]
except:
longDecimal="0"
result[2]=result[2].split(".")[0]+"."+longDecimal
result[3]=result[3].split(":")[1].strip()
latlong= result[1]+result[0]+" "+result[3]+result[2]
else:
latlong=None
print latlong
return latlong
def writeLatitude(self,lat):
"""
write the latitude value given in argument in the EXIF
positive means nothern latitudes
nagative means southest latitudes
lat can be a float or a string
"""
option=''
if self.xmpOption==True:
if(self.sidecarFile != ""):
option = option + " -o '"+self.sidecarFile+"'"
#os.popen('exiftool.exe -GPSAltitudeRef=0 -GPSAltitude=100 '+ self.picPath)
if float(lat) >= 0:
os.popen('%s -m -GPSLatitudeRef="N" %s "%s" '% (self.exifcmd, option, self.picPath))
else:
os.popen('%s -m -GPSLatitudeRef="S" %s "%s" '% (self.exifcmd, option, self.picPath))
os.popen('%s -m -GPSLatitude=%s "%s"'%(self.exifcmd, lat,self.picPath))
def writeLongitude(self,long):
"""
write the longitude value given in argument in the EXIF
positive means Eastern longitudes
nagative means Western latitudes
long can be a float or a string
"""
option=''
if self.xmpOption==True:
if(self.sidecarFile != ""):
option = option + " -o '"+self.sidecarFile+"'"
if float(long) >= 0:
os.popen('%s -m -GPSLongitudeRef="E" %s "%s"' % (self.exifcmd, option, self.picPath))
else:
os.popen('%s -m -GPSLongitudeRef="W" %s "%s"' % (self.exifcmd, option, self.picPath))
os.popen('%s -m -GPSLongitude=%s "%s" '% (self.exifcmd, long,self.picPath))
def writeLatLong(self,lat,long,latRef,longRef,backup,elevation="None"):
"""Write both latitudeRef/latitude and longitudeRef/longitude in EXIF"""
option='"-DateTimeOriginal>FileModifyDate"'
if self.xmpOption==True:
if(self.sidecarFile != ""):
option = option + " -o '"+self.sidecarFile+"'"
#print "option: ,option
if float(long)<0:long=str(abs(float(long)))
if float(lat)<0:lat=str(abs(float(lat)))
altRef=0 #"Above Sea Level"
if elevation!="None":
if float(elevation)<0:
altRef=1 #"Below Sea Level"
elevation=str(abs(float(elevation)))
#print ">>> altRef=",altRef
#print ">>> elevation ", elevation
if backup==True:
if elevation=="None":
os.popen('%s -n -m -GPSLongitude=%s -GPSLatitude=%s \
-GPSLongitudeRef=%s -GPSLatitudeRef=%s %s "%s" '\
%(self.exifcmd, long,lat,longRef,latRef,option,self.picPath))
else:
os.popen('%s -n -m -GPSLongitude=%s -GPSLatitude=%s -GPSLongitudeRef=%s \
-GPSLatitudeRef=%s -GPSAltitudeRef=%s -GPSAltitude=%s %s "%s" '\
%(self.exifcmd, long,lat,longRef,latRef,altRef,elevation,option,self.picPath))
else:
if elevation=="None":
os.popen('%s -m -overwrite_original -n -GPSLongitude=%s -GPSLatitude=%s \
-GPSLongitudeRef=%s -GPSLatitudeRef=%s %s "%s" '\
%(self.exifcmd, long,lat,longRef,latRef,option, self.picPath))
else:
os.popen('%s -m -overwrite_original -n -GPSLongitude=%s -GPSLatitude=%s \
-GPSLongitudeRef=%s -GPSLatitudeRef=%s -GPSAltitudeRef=%s -GPSAltitude=%s %s "%s"'\
%(self.exifcmd, long,lat,longRef,latRef,altRef,elevation,option,self.picPath))
if __name__=="__main__":
mypicture=GeoExif("test.jpg")
# exif=mypicture.readExifAll()
# mypicture.writeLongitude(7.222333)
# mypicture.writeLatitude(48.419973)
print mypicture.readDateTimeSize()
#mypicture.writeLatLong(7.222333,48.419973,"N","E",True)
#latitude=mypicture.readLatitude()
#longitude=mypicture.readLongitude()
#print "dateAndTime= ",dateAndTime
#print "latitude= ",latitude
#print "longitude= ",longitude
#print "EXIF= ", exif
#print mypicture.readLatLong()
|
kaiserroll14/301finalproject
|
refs/heads/master
|
main/pandas/tests/test_style.py
|
9
|
import os
from nose import SkipTest
# this is a mess. Getting failures on a python 2.7 build with
# whenever we try to import jinja, whether it's installed or not.
# so we're explicitly skipping that one *before* we try to import
# jinja. We still need to export the imports as globals,
# since importing Styler tries to import jinja2.
job_name = os.environ.get('JOB_NAME', None)
if job_name == '27_slow_nnet_LOCALE':
raise SkipTest("No jinja")
try:
from pandas.core.style import Styler
except ImportError:
raise SkipTest("No Jinja2")
import copy
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import TestCase
import pandas.util.testing as tm
class TestStyler(TestCase):
def setUp(self):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
self.assertEqual(self.styler.ctx, expected)
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
self.assertEqual(self.styler.ctx, expected)
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
self.assertEqual(self.styler.ctx, expected)
def test_copy(self):
s2 = copy.copy(self.styler)
self.assertTrue(self.styler is not s2)
self.assertTrue(self.styler.ctx is s2.ctx) # shallow
self.assertTrue(self.styler._todo is s2._todo)
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
self.assertEqual(self.styler.ctx, s2.ctx)
self.assertEqual(self.styler._todo, s2._todo)
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
self.assertTrue(self.styler is not s2)
self.assertTrue(self.styler.ctx is not s2.ctx)
self.assertTrue(self.styler._todo is not s2._todo)
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
self.assertNotEqual(self.styler.ctx, s2.ctx)
self.assertEqual(s2._todo, [])
self.assertNotEqual(self.styler._todo, s2._todo)
def test_clear(self):
s = self.df.style.highlight_max()._compute()
self.assertTrue(len(s.ctx) > 0)
self.assertTrue(len(s._todo) > 0)
s.clear()
self.assertTrue(len(s.ctx) == 0)
self.assertTrue(len(s._todo) == 0)
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style).apply(style, axis=1)
s.render()
# it worked?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
self.assertEqual(result.keys(), expected.keys())
for v1, v2 in zip(result.values(), expected.values()):
self.assertEqual(sorted(v1), sorted(v2))
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
self.assertEqual(result, expected)
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
self.assertEqual(len(result._todo), 1)
self.assertEqual(len(result.ctx), 0)
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
self.assertEqual(result.ctx, expected)
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
self.assertEqual(result.ctx, expected)
result = df.style.apply(f) # default
result._compute()
self.assertEqual(result.ctx, expected)
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index
and col in self.df.loc[slice_].columns)
self.assertEqual(result, expected)
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index
and col in self.df.loc[slice_].columns)
self.assertEqual(result, expected)
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
self.assertEqual(result, expected)
def test_bar(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 0.0%, transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
self.assertEqual(result, expected)
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 0.0%, transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
self.assertEqual(result, expected)
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
self.assertEqual(result, expected)
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
self.assertEqual(result, expected)
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
self.assertEqual(result, expected)
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with tm.assertRaises(ValueError):
df.style
with tm.assertRaises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
self.assertTrue(all(['caption' in result, 'foo' in result]))
styler = self.df.style
result = styler.set_caption('baz')
self.assertTrue(styler is result)
self.assertEqual(styler.caption, 'baz')
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
self.assertTrue('abc123' in result)
styler = self.df.style
result = styler.set_uuid('aaa')
self.assertTrue(result is styler)
self.assertEqual(result.uuid, 'aaa')
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
self.assertTrue('th { foo: bar; }' in result)
styler = self.df.style
result = styler.set_table_styles(style)
self.assertTrue(styler is result)
self.assertEqual(styler.table_styles, style)
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
self.assertTrue('class="foo" data-bar' in result)
result = self.df.style.set_table_attributes(attributes).render()
self.assertTrue('class="foo" data-bar' in result)
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
self.assertEqual(s.precision, 10)
s = Styler(self.df, precision=2)
self.assertEqual(s.precision, 2)
s2 = s.set_precision(4)
self.assertTrue(s is s2)
self.assertEqual(s.precision, 4)
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
self.assertEqual(result[(1, 1)], ['color: red'])
def test_trim(self):
result = self.df.style.render() # trim=True
self.assertEqual(result.count('#'), 0)
result = self.df.style.highlight_max().render()
self.assertEqual(result.count('#'), len(self.df.columns))
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
self.assertEqual(result[(1, 1)], ['background-color: yellow'])
result = getattr(df.style, attr)(color='green')._compute().ctx
self.assertEqual(result[(1, 1)], ['background-color: green'])
result = getattr(df.style, attr)(subset='A')._compute().ctx
self.assertEqual(result[(1, 0)], ['background-color: yellow'])
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
self.assertEqual(result, expected)
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
self.assertEqual(result, expected)
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
self.assertEqual(style1._todo, style2._todo)
style2.render()
@tm.mplskip
class TestStylerMatplotlibDep(TestCase):
def test_background_gradient(self):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for axis in [0, 1, 'index', 'columns']:
for cmap in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=cmap)._compute().ctx
self.assertTrue(all("#" in x[0] for x in result.values()))
self.assertEqual(result[(0, 0)], result[(0, 1)])
self.assertEqual(result[(1, 0)], result[(1, 1)])
result = (df.style.background_gradient(subset=pd.IndexSlice[1, 'A'])
._compute().ctx)
self.assertEqual(result[(1, 0)], ['background-color: #fff7fb'])
|
leiferikb/bitpop-private
|
refs/heads/master
|
chrome/test/functional/autofill.py
|
65
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import simplejson
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_pages import settings
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill UI works correctly. Also contains a manual test for
the crowdsourcing server."""
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = 'David'
lname = 'Yu'
email = 'david.yu@gmail.com'
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
profile = {'fn': fname, 'ln': lname, 'em': email}
js = ''.join(['document.getElementById("%s").value = "%s";' %
(key, value) for key, value in profile.iteritems()])
js += 'document.getElementById("testform").submit();'
self.ExecuteJavascript(js)
def _SelectOptionXpath(self, value):
"""Returns an xpath query used to select an item from a dropdown list.
Args:
value: Option selected for the drop-down list field.
Returns:
The value of the xpath query.
"""
return '//option[@value="%s"]' % value
def testPostalCodeAndStateLabelsBasedOnCountry(self):
"""Verify postal code and state labels based on selected country."""
data_file = os.path.join(self.DataDir(), 'autofill', 'functional',
'state_zip_labels.txt')
test_data = simplejson.loads(open(data_file).read())
page = settings.AutofillEditAddressDialog.FromNavigation(self._driver)
# Initial check of State and ZIP labels.
self.assertEqual('State', page.GetStateLabel())
self.assertEqual('ZIP code', page.GetPostalCodeLabel())
for country_code in test_data:
page.Fill(country_code=country_code)
# Compare postal code labels.
actual_postal_label = page.GetPostalCodeLabel()
self.assertEqual(
test_data[country_code]['postalCodeLabel'],
actual_postal_label,
msg=('Postal code label "%s" does not match Country "%s"' %
(actual_postal_label, country_code)))
# Compare state labels.
actual_state_label = page.GetStateLabel()
self.assertEqual(
test_data[country_code]['stateLabel'],
actual_state_label,
msg=('State label "%s" does not match Country "%s"' %
(actual_state_label, country_code)))
def testNoDuplicatePhoneNumsInPrefs(self):
"""Test duplicate phone numbers entered in prefs are removed."""
page = settings.AutofillEditAddressDialog.FromNavigation(self._driver)
non_duplicates = ['111-1111', '222-2222']
duplicates = ['111-1111']
page.Fill(phones=non_duplicates + duplicates)
self.assertEqual(non_duplicates, page.GetPhones(),
msg='Duplicate phone number in prefs unexpectedly saved.')
def testDisplayLineItemForEntriesWithNoCCNum(self):
"""Verify Autofill creates a line item for CC entries with no CC number."""
self.NavigateToURL('chrome://settings-frame/autofillEditCreditCard')
self._driver.find_element_by_id('name-on-card').send_keys('Jane Doe')
query_month = self._SelectOptionXpath('12')
query_year = self._SelectOptionXpath('2014')
self._driver.find_element_by_id('expiration-month').find_element_by_xpath(
query_month).click()
self._driver.find_element_by_id('expiration-year').find_element_by_xpath(
query_year).click()
self._driver.find_element_by_id(
'autofill-edit-credit-card-apply-button').click()
# Refresh the page to ensure the UI is up-to-date.
self._driver.refresh()
list_entry = self._driver.find_element_by_class_name('autofill-list-item')
self.assertTrue(list_entry.is_displayed)
self.assertEqual('Jane Doe', list_entry.text,
msg='Saved CC line item not same as what was entered.')
def _GetElementList(self, container_elem, fields_to_select):
"""Returns all sub elements of specific characteristics.
Args:
container_elem: An element that contains other elements.
fields_to_select: A list of fields to select with strings that
help create an xpath string, which in turn identifies
the elements needed.
For example: ['input', 'button']
['div[@id]', 'button[@disabled]']
['*[class="example"]']
Returns:
List of all subelements found in the container element.
"""
self.assertTrue(fields_to_select, msg='No fields specified for selection.')
fields_to_select = ['.//' + field for field in fields_to_select]
xpath_arg = ' | '.join(fields_to_select)
field_elems = container_elem.find_elements_by_xpath(xpath_arg)
return field_elems
def _GetElementInfo(self, element):
"""Returns visual comprehensive info about an element.
This function identifies the text of the correspoinding label when tab
ordering fails.
This info consists of:
The labels, buttons, ids, placeholder attribute values, or the element id.
Args:
element: The target element.
Returns:
A string that identifies the element in the page.
"""
element_info = ''
if element.tag_name == 'button':
element_info = element.text
element_info = (element_info or element.get_attribute('id') or
element.get_attribute('placeholder') or
element.get_attribute('class') or element.id)
return '%s: %s' % (element.tag_name, element_info)
def _LoadPageAndGetFieldList(self):
"""Navigate to autofillEditAddress page and finds the elements with focus.
These elements are of input, select, and button types.
Returns:
A list with all elements that can receive focus.
"""
url = 'chrome://settings-frame/autofillEditAddress'
self._driver.get(url)
container_elem = self._driver.find_element_by_id(
'autofill-edit-address-overlay')
# The container element contains input, select and button fields. Some of
# the buttons are disabled so they are ignored.
field_list = self._GetElementList(container_elem,
['input', 'select',
'button[not(@disabled)]'])
self.assertTrue(field_list, 'No fields found in "%s".' % url)
return field_list
def testTabOrderForEditAddress(self):
"""Verify the TAB ordering for Edit Address page is correct."""
tab_press = ActionChains(self._driver).send_keys(Keys.TAB)
field_list = self._LoadPageAndGetFieldList()
# Creates a dictionary where a field key returns the value of the next field
# in the field list. The last field of the field list is mapped to the first
# field of the field list.
field_nextfield_dict = dict(
zip(field_list, field_list[1:] + field_list[:1]))
# Wait until a field of |field_list| has received the focus.
self.WaitUntil(lambda:
self._driver.switch_to_active_element().id in
[f.id for f in field_list])
# The first field is expected to receive the focus.
self.assertEqual(self._driver.switch_to_active_element().id,
field_list[0].id,
msg='The first field did not receive tab focus.')
for field in field_list:
tab_press.perform()
# Wait until a field of |field_list|, other than the current field, has
# received the focus.
self.WaitUntil(lambda:
self._driver.switch_to_active_element().id != field.id and
self._driver.switch_to_active_element().id in
[f.id for f in field_list])
self.assertEqual(self._driver.switch_to_active_element().id,
field_nextfield_dict[field].id,
msg=('The TAB ordering is broken. Previous field: "%s"\n'
'Field expected to receive focus: "%s"\n'
'Field that received focus instead: "%s"')
% (self._GetElementInfo(field),
self._GetElementInfo(field_nextfield_dict[field]),
self._GetElementInfo(
self._driver.switch_to_active_element())))
if __name__ == '__main__':
pyauto_functional.Main()
|
rfhk/awo-custom
|
refs/heads/8.0
|
abstract_report_xlsx/reports/stock_abstract_report_xlsx.py
|
2
|
# -*- coding: utf-8 -*-
# Author: Julien Coux
# Copyright 2016 Camptocamp SA
# Copyright 2016 Rooms For (Hong Kong) Limited T/A OSCG
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from cStringIO import StringIO
import xlsxwriter
from xlsxwriter.utility import xl_col_to_name # OSCG
from openerp.addons.report_xlsx.report.report_xlsx import ReportXlsx
from io import BytesIO
import base64
class StockAbstractReportXslx(ReportXlsx):
def __init__(self, name, table, rml=False, parser=False, header=True,
store=False):
super(StockAbstractReportXslx, self).__init__(
name, table, rml, parser, header, store)
# main sheet which will contains report
self.sheet = None
# columns of the report
self.columns = None
# row_pos must be incremented at each writing lines
self.row_pos = None
# Formats
self.format_right = None
self.format_right_bold_italic = None
self.format_bold = None
self.format_header_left = None
self.format_header_center = None
self.format_header_right = None
self.format_header_amount = None
self.format_amount = None
self.format_number = None # added by OSCG
self.format_percent = None # added by OSCG
self.format_percent_bold_italic = None
self.format_wrap = None # added by OSCG
self.format_emphasis = None # added by OSCG
def create_xlsx_report(self, ids, data, report):
""" Overrides method to add constant_memory option used for large files
"""
self.parser_instance = self.parser(
self.env.cr, self.env.uid, self.name2, self.env.context)
objs = self.getObjects(
self.env.cr, self.env.uid, ids, self.env.context)
self.parser_instance.set_context(objs, data, ids, 'xlsx')
file_data = StringIO()
workbook = xlsxwriter.Workbook(file_data, {'constant_memory': True})
self.generate_xlsx_report(workbook, data, objs)
workbook.close()
file_data.seek(0)
return (file_data.read(), 'xlsx')
def generate_xlsx_report(self, workbook, data, objects):
report = objects
self.row_pos = 0
self._define_formats(workbook)
report_name = self._get_report_name()
filters = self._get_report_filters(report)
self.columns = self._get_report_columns(report)
self.sheet = workbook.add_worksheet(report_name[:31])
self._set_column_width()
self._write_report_title(report_name)
self._write_filters(filters)
self._generate_report_content(workbook, report)
def _define_formats(self, workbook):
""" Add cell formats to current workbook.
Those formats can be used on all cell.
Available formats are :
* format_bold
* format_right
* format_right_bold_italic
* format_header_left
* format_header_center
* format_header_right
* format_header_amount
* format_amount
* format_number # added by OSCG
* format_percent # added by OSCG
* format_percent_bold_italic
* format_wrap # added by OSCG
* format_emphasis # added by OSCG
"""
self.format_bold = workbook.add_format({'bold': True})
self.format_right = workbook.add_format({'align': 'right'})
self.format_right_bold_italic = workbook.add_format(
{'align': 'right', 'bold': True, 'italic': True}
)
self.format_header_left = workbook.add_format(
{'bold': True,
'border': True,
'bg_color': '#FFFFCC'})
self.format_header_left.set_text_wrap() # added by OSCG
self.format_header_center = workbook.add_format(
{'bold': True,
'align': 'center',
'border': True,
'bg_color': '#FFFFCC'})
self.format_header_center.set_text_wrap() # added by OSCG
self.format_header_right = workbook.add_format(
{'bold': True,
'align': 'right',
'border': True,
'bg_color': '#FFFFCC'})
self.format_header_right.set_text_wrap() # added by OSCG
self.format_header_amount = workbook.add_format(
{'bold': True,
'border': True,
'bg_color': '#FFFFCC'})
self.format_header_amount.set_num_format('#,##0.00')
self.format_amount = workbook.add_format()
self.format_amount.set_num_format('#,##0.00')
self.format_number = workbook.add_format() # added by OSCG
self.format_number.set_num_format('#,##0') # added by OSCG
self.format_percent = workbook.add_format() # added by OSCG
self.format_percent.set_num_format('#,##0.00%') # added by OSCG
self.format_percent_bold_italic = workbook.add_format(
{'bold': True, 'italic': True}
)
self.format_percent_bold_italic.set_num_format('#,##0.00%')
self.format_wrap = workbook.add_format() # added by OSCG
self.format_wrap.set_text_wrap() # added by OSCG
self.format_emphasis = workbook.add_format({'bold': True}) # OSCG
self.format_emphasis.set_font_color('red') # OSCG
def _set_column_width(self):
"""Set width for all defined columns.
Columns are defined with `_get_report_columns` method.
"""
for position, column in self.columns.iteritems():
self.sheet.set_column(position, position, column['width'])
def _write_report_title(self, title):
"""Write report title on current line using all defined columns width.
Columns are defined with `_get_report_columns` method.
"""
self.sheet.merge_range(
self.row_pos, 0, self.row_pos, len(self.columns) - 1,
title, self.format_bold
)
self.row_pos += 3
def _write_filters(self, filters):
"""Write one line per filters on starting on current line.
Columns number for filter name is defined
with `_get_col_count_filter_name` method.
Columns number for filter value is define
with `_get_col_count_filter_value` method.
"""
col_name = 1
col_count_filter_name = self._get_col_count_filter_name()
col_count_filter_value = self._get_col_count_filter_value()
col_value = col_name + col_count_filter_name + 1
for title, value in filters:
self.sheet.merge_range(
self.row_pos, col_name,
self.row_pos, col_name + col_count_filter_name - 1,
title, self.format_header_left)
self.sheet.merge_range(
self.row_pos, col_value,
self.row_pos, col_value + col_count_filter_value - 1,
value)
self.row_pos += 1
self.row_pos += 2
def write_array_title(self, title):
"""Write array title on current line using all defined columns width.
Columns are defined with `_get_report_columns` method.
"""
self.sheet.merge_range(
self.row_pos, 0, self.row_pos, len(self.columns) - 1,
title, self.format_bold
)
self.row_pos += 1
# def write_array_header(self):
def write_array_header(self, adj_col=False): # OSCG
"""Write array header on current line using all defined columns name.
Columns are defined with `_get_report_columns` method.
"""
for col_pos, column in self.columns.iteritems():
if adj_col and col_pos in adj_col:
self.sheet.write(self.row_pos, col_pos, adj_col[col_pos],
self.format_header_center)
else:
self.sheet.write(self.row_pos, col_pos, column['header'],
self.format_header_center)
self.row_pos += 1
# def write_line(self, line_object):
def write_line(self, line_object, height=False): # OSCG
"""Write a line on current line using all defined columns field name.
Columns are defined with `_get_report_columns` method.
"""
for col_pos, column in self.columns.iteritems():
# >>> added by OSCG
if height:
self.sheet.set_row(self.row_pos, height)
# <<< added by OSCG
value = getattr(line_object, column['field'])
cell_type = column.get('type', 'string')
if cell_type == 'string':
self.sheet.write_string( # OSCG
self.row_pos, col_pos, value or '', self.format_wrap
)
elif cell_type == 'amount':
self.sheet.write_number(
self.row_pos, col_pos, float(value), self.format_amount
)
# >>> added by OSCG
elif cell_type == 'number':
self.sheet.write_number(
self.row_pos, col_pos, value, self.format_number
)
elif cell_type == 'image':
if line_object.image_small:
image = BytesIO(base64.b64decode(line_object.image_small))
self.sheet.insert_image(
self.row_pos, col_pos, 'image', {'image_data': image}
)
elif cell_type == 'percent':
self.sheet.write_number(
self.row_pos, col_pos, value, self.format_percent
)
# <<< added by OSCG
self.row_pos += 1
def _generate_report_content(self, workbook, report):
pass
def _apply_conditional_format(self, params):
for param in params:
# has to convert the column to 'A1:A999' notation
column = xl_col_to_name(param['col'])
column += '1:' + column + str(self.row_pos)
for val in param['vals']:
self.sheet.conditional_format(
column, {
'type': 'text',
'criteria': 'containing',
'value': val,
'format': self.format_emphasis
})
def _get_report_name(self):
"""
Allow to define the report name.
Report name will be used as sheet name and as report title.
:return: the report name
"""
raise NotImplementedError()
def _get_report_columns(self, report):
"""
Allow to define the report columns
which will be used to generate report.
:return: the report columns as dict
:Example:
{
0: {'header': 'Simple column',
'field': 'field_name_on_my_object',
'width': 11},
1: {'header': 'Amount column',
'field': 'field_name_on_my_object',
'type': 'amount',
'width': 14},
}
"""
raise NotImplementedError()
def _get_report_filters(self, report):
"""
:return: the report filters as list
:Example:
[
['first_filter_name', 'first_filter_value'],
['second_filter_name', 'second_filter_value']
]
"""
raise NotImplementedError()
def _get_col_count_filter_name(self):
"""
:return: the columns number used for filter names.
"""
raise NotImplementedError()
def _get_col_count_filter_value(self):
"""
:return: the columns number used for filter values.
"""
raise NotImplementedError()
|
micaiahparker/dicebot
|
refs/heads/master
|
bot/__main__.py
|
1
|
from .bot import bot
for extension in bot.config.get_extensions():
try:
bot.load_extension(extension)
print('Added: {}'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to load extension {}\n{}'.format(extension, exc))
bot.run(bot.config.get_key())
|
bibekluitel/ntu-dsi-dcn
|
refs/heads/master
|
src/tap-bridge/bindings/modulegen_customizations.py
|
203
|
import os
def post_register_types(root_module):
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'TapBridge' not in enabled_features:
for clsname in ['TapBridge', 'TapBridgeHelper', 'TapBridgeFdReader']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::TapBridge::Mode'])
|
hip-odoo/odoo
|
refs/heads/10.0
|
addons/base_action_rule/tests/__init__.py
|
23
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import test_models
import test_base_action_rule
|
codrut3/tensorflow
|
refs/heads/master
|
tensorflow/examples/adding_an_op/zero_out_op_3.py
|
190
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut op Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_3.so'))
zero_out = _zero_out_module.zero_out
|
jlegendary/nupic
|
refs/heads/master
|
examples/opf/experiments/multistep/hotgym/permutations.py
|
8
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'modelParams': {
'inferenceType': PermuteChoices(['NontemporalMultiStep', 'TemporalMultiStep']),
'sensorParams': {
'encoders': {
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteFloat(1.000000, 6.000000), w=21),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteFloat(0.500000, 12.000000), w=21),
'consumption': PermuteEncoder(fieldName='consumption', encoderClass='AdaptiveScalarEncoder', n=PermuteInt(28, 521), w=21, clipInput=True),
'timestamp_weekend': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.weekend', radius=PermuteChoices([1]), w=21),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
'pamLength': PermuteInt(1, 5),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=1000:field=consumption"
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
illicitonion/givabit
|
refs/heads/master
|
lib/sdks/google_appengine_1.7.1/google_appengine/google/appengine/api/rdbms_mysqldb.py
|
5
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Relational database API stub that uses the MySQLdb DB-API library.
Also see the rdbms module.
"""
import logging
import os
_POTENTIAL_SOCKET_LOCATIONS = (
'/tmp/mysql.sock',
'/var/run/mysqld/mysqld.sock',
'/var/lib/mysql/mysql.sock',
'/var/run/mysql/mysql.sock',
'/var/mysql/mysql.sock',
)
_connect_kwargs = {}
def SetConnectKwargs(**kwargs):
"""Sets the keyword args (host, user, etc) to pass to MySQLdb.connect()."""
global _connect_kwargs
_connect_kwargs = dict(kwargs)
def FindUnixSocket():
"""Find the Unix socket for MySQL by scanning some known locations.
Returns:
If found, the path to the Unix socket, otherwise, None.
"""
for path in _POTENTIAL_SOCKET_LOCATIONS:
if os.path.exists(path):
return path
try:
import google
import MySQLdb
from MySQLdb import *
__import__('MySQLdb.constants', globals(), locals(), ['*'])
except ImportError:
logging.warning('The rdbms API is not available because the MySQLdb '
'library could not be loaded.')
def connect(instance=None, database=None):
raise NotImplementedError(
'Unable to find the MySQLdb library. Please see the SDK '
'documentation for installation instructions.')
else:
def connect(instance=None, database=None, **kwargs):
merged_kwargs = _connect_kwargs.copy()
if database:
merged_kwargs['db'] = database
merged_kwargs.update(kwargs)
if 'password' in merged_kwargs:
merged_kwargs['passwd'] = merged_kwargs.pop('password')
host = merged_kwargs.get('host')
if ((not host or host == 'localhost') and
not merged_kwargs.get('unix_socket')):
socket = FindUnixSocket()
if socket:
merged_kwargs['unix_socket'] = socket
else:
logging.warning(
'Unable to find MySQL socket file. Use --mysql_socket to '
'specify its location manually.')
logging.info('Connecting to MySQL with kwargs %r', merged_kwargs)
try:
return MySQLdb.connect(**merged_kwargs)
except MySQLdb.Error:
logging.critical(
'MySQL connection failed! Ensure that you have provided correct '
'values for the --mysql_* flags when running dev_appserver.py')
raise
def set_instance(instance):
logging.info('set_instance() is a noop in dev_appserver.')
|
andela-ooladayo/django
|
refs/heads/master
|
tests/gis_tests/geos_tests/__init__.py
|
12133432
| |
ilpise/geonode
|
refs/heads/master
|
geonode/upload/__init__.py
|
12133432
| |
wagnerand/olympia
|
refs/heads/master
|
src/olympia/activity/management/__init__.py
|
12133432
| |
austinhyde/ansible-modules-core
|
refs/heads/devel
|
__init__.py
|
12133432
| |
kho0810/flaskr
|
refs/heads/master
|
lib/wtforms/ext/appengine/__init__.py
|
177
|
import warnings
warnings.warn(
'wtforms.ext.appengine is deprecated, and will be removed in WTForms 3.0. '
'The package has been split out into its own package, wtforms-appengine: '
'https://github.com/wtforms/wtforms-appengine ',
DeprecationWarning
)
|
gzuser01/zetacoin-bitcoin
|
refs/heads/gzuser01-patch-1
|
qa/rpc-tests/mempool_reorg.py
|
56
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111111", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
SevInf/IEDriver
|
refs/heads/master
|
py/test/selenium/webdriver/common/typing_tests.py
|
19
|
# copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class TypingTests(unittest.TestCase):
def testShouldFireKeyPressEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("press:" in result.text)
def testShouldFireKeyDownEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("I")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("down" in result.text)
def testShouldFireKeyUpEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("up:" in result.text)
def testShouldTypeLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
self.assertEqual(keyReporter.get_attribute("value"), "abc def")
def testShouldBeAbleToTypeCapitalLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("ABC DEF")
self.assertEqual(keyReporter.get_attribute("value"), "ABC DEF")
def testShouldBeAbleToTypeQuoteMarks(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("\"")
self.assertEqual(keyReporter.get_attribute("value"), "\"")
def testShouldBeAbleToTypeTheAtCharacter(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("@")
self.assertEqual(keyReporter.get_attribute("value"), "@")
def testShouldBeAbleToMixUpperAndLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("me@eXample.com")
self.assertEqual(keyReporter.get_attribute("value"), "me@eXample.com")
def testArrowKeysShouldNotBePrintable(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys(Keys.ARROW_LEFT)
self.assertEqual(keyReporter.get_attribute("value"), "")
def testShouldBeAbleToUseArrowKeys(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("Tet", Keys.ARROW_LEFT, "s")
self.assertEqual(keyReporter.get_attribute("value"), "Test")
def testWillSimulateAKeyUpWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUp")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDown")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPress")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyUpWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUpArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDownArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPressArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testShouldReportKeyCodeOfArrowKeysUpDownEvents(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(Keys.ARROW_DOWN)
self.assertTrue("down: 40" in result.text.strip())
self.assertTrue("up: 40" in result.text.strip())
element.send_keys(Keys.ARROW_UP)
self.assertTrue("down: 38" in result.text.strip())
self.assertTrue("up: 38" in result.text.strip())
element.send_keys(Keys.ARROW_LEFT)
self.assertTrue("down: 37" in result.text.strip())
self.assertTrue("up: 37" in result.text.strip())
element.send_keys(Keys.ARROW_RIGHT)
self.assertTrue("down: 39" in result.text.strip())
self.assertTrue("up: 39" in result.text.strip())
# And leave no rubbish/printable keys in the "keyReporter"
self.assertEqual(element.get_attribute("value"), "")
def testNumericNonShiftKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericLineCharsNonShifted = "`1234567890-=[]\\,.'/42"
element.send_keys(numericLineCharsNonShifted)
self.assertEqual(element.get_attribute("value"), numericLineCharsNonShifted)
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
#reason = "untested user agent")
def testNumericShiftKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericShiftsEtc = "~!@#$%^&*()_+{}:i\"<>?|END~"
element.send_keys(numericShiftsEtc)
self.assertEqual(element.get_attribute("value"), numericShiftsEtc)
self.assertTrue(" up: 16" in result.text.strip())
def testLowerCaseAlphaKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
lowerAlphas = "abcdefghijklmnopqrstuvwxyz"
element.send_keys(lowerAlphas)
self.assertEqual(element.get_attribute("value"), lowerAlphas)
def testUppercaseAlphaKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
upperAlphas = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
element.send_keys(upperAlphas)
self.assertEqual(element.get_attribute("value"), upperAlphas)
self.assertTrue(" up: 16" in result.text.strip())
def testAllPrintableKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
allPrintable = "!\"#$%&'()*+,-./0123456789:<=>?@ ABCDEFGHIJKLMNOPQRSTUVWXYZ [\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
element.send_keys(allPrintable)
self.assertTrue(element.get_attribute("value"), allPrintable)
self.assertTrue(" up: 16" in result.text.strip())
def testArrowKeysAndPageUpAndDown(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("a" + Keys.LEFT + "b" + Keys.RIGHT +
Keys.UP + Keys.DOWN + Keys.PAGE_UP + Keys.PAGE_DOWN + "1")
self.assertEqual(element.get_attribute("value"), "ba1")
#def testHomeAndEndAndPageUpAndPageDownKeys(self):
# // FIXME: macs don't have HOME keys, would PGUP work?
# if (Platform.getCurrent().is(Platform.MAC)) {
# return
# }
# self._loadPage("javascriptPage")
# element = self.driver.find_element(by=By.ID, value="keyReporter")
# element.send_keys("abc" + Keys.HOME + "0" + Keys.LEFT + Keys.RIGHT +
# Keys.PAGE_UP + Keys.PAGE_DOWN + Keys.END + "1" + Keys.HOME +
# "0" + Keys.PAGE_UP + Keys.END + "111" + Keys.HOME + "00")
# self.assertThat(element.get_attribute("value"), is("0000abc1111"))
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testDeleteAndBackspaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcdefghi")
self.assertEqual(element.get_attribute("value"), "abcdefghi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcdefgi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.BACK_SPACE)
self.assertEqual(element.get_attribute("value"), "abcdfgi")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE}, reason = "untested user agents")
def testSpecialSpaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.SPACE + "fgh" + Keys.SPACE + "ij")
self.assertEqual(element.get_attribute("value"), "abcd fgh ij")
def testNumberpadAndFunctionKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.MULTIPLY + Keys.SUBTRACT + Keys.ADD +
Keys.DECIMAL + Keys.SEPARATOR + Keys.NUMPAD0 + Keys.NUMPAD9 +
Keys.ADD + Keys.SEMICOLON + Keys.EQUALS + Keys.DIVIDE +
Keys.NUMPAD3 + "abcd")
self.assertEqual(element.get_attribute("value"), "abcd*-+.,09+;=/3abcd")
element.clear()
element.send_keys("FUNCTION" + Keys.F2 + "-KEYS" + Keys.F2)
element.send_keys("" + Keys.F2 + "-TOO" + Keys.F2)
self.assertEqual(element.get_attribute("value"), "FUNCTION-KEYS-TOO")
def testShiftSelectionDeletes(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd efgh")
self.assertEqual(element.get_attribute("value"), "abcd efgh")
element.send_keys(Keys.SHIFT, Keys.LEFT, Keys.LEFT, Keys.LEFT)
element.send_keys(Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcd e")
def testShouldTypeIntoInputElementsThatHaveNoTypeAttribute(self):
self._loadPage("formPage")
element = self.driver.find_element(by=By.ID, value="no-type")
element.send_keys("Should Say Cheese")
self.assertEqual(element.get_attribute("value"), "Should Say Cheese")
def testShouldTypeAnInteger(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(1234)
self.assertEqual(element.get_attribute("value"), "1234")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
pipsiscool/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/waflib/Tools/dmd.py
|
316
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_dmd(conf):
conf.find_program(['dmd','dmd2','ldc'],var='D')
out=conf.cmd_and_log([conf.env.D,'--help'])
if out.find("D Compiler v")==-1:
out=conf.cmd_and_log([conf.env.D,'-version'])
if out.find("based on DMD v1.")==-1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
v=conf.env
v['DFLAGS']=['-d-version=Posix']
v['LINKFLAGS']=[]
v['DFLAGS_dshlib']=['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
v=conf.env
v['D_SRC_F']=['-c']
v['D_TGT_F']='-of%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-of%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-L-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L-L%s'
v['LINKFLAGS_dprogram']=['-quiet']
v['DFLAGS_dshlib']=['-fPIC']
v['LINKFLAGS_dshlib']=['-L-shared']
v['DHEADER_ext']='.di'
v.DFLAGS_d_with_header=['-H','-Hf']
v['D_HDR_F']='%s'
def configure(conf):
conf.find_dmd()
if sys.platform=='win32':
out=conf.cmd_and_log([conf.env.D,'--help'])
if out.find("D Compiler v2.")>-1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc')>-1:
conf.common_flags_ldc()
|
dgrant/brickowl2rebrickable
|
refs/heads/master
|
rebrickable_colors.py
|
1
|
"""
Parses rebrickable's color data from http://rebrickable.com/colors
"""
import html.parser
def strarray(data):
"""
Parses a string that looks like '{foo, "bar"}' into an array of strings
:param data: a string that look like '{foo, "bar"}' or '{foo, bar}', etc...
:return: an array of string objects
"""
data = data.strip('{}')
data = data.split(',')
data = [x.strip('"') for x in data]
return data
def intarray(data):
"""
Parses a string that looks like '{1, "2"}' into an array of ints
:param data: a string that look like '{1, "2"}' or '{1, 2}', etc...
:return: an array of int objects
"""
return [int(x) for x in strarray(data)]
class ColorTableParser(html.parser.HTMLParser): # pylint: disable=R0904
"""
Parses the color data from http://rebrickable.com/colors
"""
TYPES = {'ID': int,
'Name': str,
'RGB': str,
'Num Parts': int,
'Num Sets': int,
'From Year': int,
'To Year': int,
'LEGO Color': strarray,
'LDraw Color': intarray,
'BrickLink Color': intarray,
'Peeron Color': strarray}
def __init__(self):
html.parser.HTMLParser.__init__(self)
self._in = {'table': False, 'row': False, 'col': False, 'header': False}
self._row = 0
self._col = 0
self._header_data = {}
self._row_data = None
self.table_data = []
def handle_starttag(self, tag, attrs):
if not self._in['table'] and tag.lower() == 'table' and ('class', 'table') in attrs:
self._in['table'] = True
elif self._in['table']:
if not self._in['row'] and tag.lower() == 'tr':
self._in['row'] = True
self._row_data = {}
elif self._in['row'] and not self._in['col'] and tag.lower() == 'td':
self._in['col'] = True
def handle_endtag(self, tag):
if tag.lower() == 'table':
self._in['table'] = False
elif tag.lower() == 'td':
self._in['col'] = False
self._col += 1
elif tag.lower() == 'tr':
self._in['row'] = False
self._row += 1
self._col = 0
if not self._in['header']:
self.table_data.append(self._row_data)
else:
self._in['header'] = False
self._row_data = None
def handle_data(self, data):
if self._in['row'] and self._in['col']:
if data == 'ID':
self._in['header'] = True
self._row -= 1
if self._in['header']:
self._header_data[self._col] = data
else:
header_name = self._header_data[self._col]
self._row_data[header_name] = self.TYPES[header_name](data)
class ColorTable(object):
"""
A table of color ids, parsed from http://rebrickable.com/colors
"""
def __init__(self, data):
self._data = data
self._lego_color_to_id = {}
self._peeron_color_to_id = {}
self._color_name_to_id = {}
self.parse()
def parse(self):
"""
Parse the data in self._data into a few maps that can be used to find a rebrickable id from another type of
id such as lego color, peeron color, or Rebrickable color name
:return: nothing
"""
for color in self._data:
if 'LEGO Color' in color:
for lego_color_name in color['LEGO Color']:
self._lego_color_to_id[lego_color_name.lower()] = color['ID']
if 'Peeron Color' in color:
for peeron_color_name in color['Peeron Color']:
self._peeron_color_to_id[peeron_color_name.lower()] = color['ID']
if 'Name' in color:
self._color_name_to_id[color['Name'].lower()] = color['ID']
def get_colorid_from_brickowl_name(self, brickowl_colorname):
"""
:param brickowl_colorname: a brickowl color name
:return: the Rebrickable color id for a brickowl color name
"""
brickowl_colorname = brickowl_colorname.lower()
if brickowl_colorname in self._lego_color_to_id:
return self._lego_color_to_id[brickowl_colorname]
if brickowl_colorname in self._peeron_color_to_id:
return self._peeron_color_to_id[brickowl_colorname]
if brickowl_colorname in self._color_name_to_id:
return self._color_name_to_id[brickowl_colorname]
if brickowl_colorname.find('transparent') == 0:
return self.get_colorid_from_brickowl_name(brickowl_colorname.replace('transparent ', 'trans-'))
if brickowl_colorname.find('gray') != -1:
return self.get_colorid_from_brickowl_name(brickowl_colorname.replace('gray', 'grey'))
print("!!! Name:", brickowl_colorname, "is unmatched")
return None
|
saurabhbajaj207/CarpeDiem
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py
|
435
|
from __future__ import absolute_import, division, unicode_literals
from collections import MutableMapping
from xml.dom import minidom, Node
import weakref
from . import base
from .. import constants
from ..constants import namespaces
from .._utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(MutableMapping):
def __init__(self, element):
self.element = element
def __iter__(self):
return iter(self.element.attributes.keys())
def __setitem__(self, name, value):
if isinstance(name, tuple):
raise NotImplementedError
else:
attr = self.element.ownerDocument.createAttribute(name)
attr.value = value
self.element.attributes[name] = attr
def __len__(self):
return len(self.element.attributes)
def items(self):
return list(self.element.attributes.items())
def values(self):
return list(self.element.attributes.values())
def __getitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.attributes[name].value
def __delitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
del self.element.attributes[name]
class NodeBuilder(base.Node):
def __init__(self, element):
base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI") and
self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
# pylint:disable=protected-access
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
|
tumbl3w33d/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/aws/batch.py
|
11
|
# Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Batch modules.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, snake_dict_to_camel_dict
try:
from botocore.exceptions import ClientError
except ImportError:
pass # Handled by HAS_BOTO3
class AWSConnection(object):
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['batch']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['batch'].meta.region_name
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='batch'):
return self.resource_client[resource]
def cc(key):
"""
Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
'computeEnvironmentName'.
:param key:
:return:
"""
components = key.split('_')
return components[0] + "".join([token.capitalize() for token in components[1:]])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
|
BeiLuoShiMen/nupic
|
refs/heads/master
|
tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py
|
12
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests OPF descriptionTemplate.py-based experiment/sub-experiment pair"""
import os
import pprint
import sys
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.opfhelpers import (
loadExperimentDescriptionScriptFromDir,
getExperimentDescriptionInterfaceFromModule
)
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
g_debug = False
class MyTestEnvironment(object):
def __init__(self):
examplesDir = resource_filename("nupic", os.path.join("..", "examples"))
_debugOut("examplesDir=<%s>" % (examplesDir,))
assert os.path.exists(examplesDir), \
"%s is not present in filesystem" % examplesDir
# This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
# In the autobuild, it is a read-only directory
self.__opfBinDir = resource_filename("nupic", os.path.join("..", "scripts"))
assert os.path.exists(self.__opfBinDir), \
"%s is not present in filesystem" % self.__opfBinDir
_debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)
# Where this script is running from (our autotest counterpart may have
# copied it from its original location)
self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
_debugOut("self.__testRunDir=<%s>" % self.__testRunDir)
# Parent directory of our private OPF experiments
self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
"experiments")
assert os.path.exists(self.__opfExperimentsParentDir), \
"%s is not present in filesystem" % self.__opfExperimentsParentDir
_debugOut("self.__opfExperimentsParentDir=<%s>"
% self.__opfExperimentsParentDir)
def getOpfRunExperimentPyPath(self):
return os.path.join(self.__opfBinDir, "run_opf_experiment.py")
def getOpfExperimentPath(self, experimentName):
"""
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory
"""
path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), \
"Experiment path %s doesn't exist or is not a directory" % (path,)
return path
class MyTestCaseBase(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
g_myEnv = MyTestEnvironment()
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
# Reset our log items
self.resetExtraLogItems()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def executePositiveOpfExperiment(self, experimentName, short=False):
""" Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs
"""
opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,
experimentDirPath=opfExpDir,
short=short)
return r
def __executePositiveRunExperimentTest(self,
runnerPath,
experimentDirPath,
customOptions=[],
short=False):
""" Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs
"""
#----------------------------------------
# Set up args
command = [
"python",
runnerPath,
experimentDirPath,
]
command.extend(customOptions)
if short:
command.append("--testMode")
self.addExtraLogItem({'command':command})
#----------------------------------------
# Execute RunExperiment.py as subprocess and collect results
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result':r})
_debugOut(("_executeExternalCmdAndReapOutputs(%s)=%s") % (command, r))
#----------------------------------------
# Check subprocess exit status
self.assertEqual(r['exitStatus'], 0,
("Expected status = 0 from %s; got: %s") % \
(runnerPath, r['exitStatus'],))
self.resetExtraLogItems()
return r
class PositiveTests(MyTestCaseBase):
#========================
def test_sub_experiment_override(self):
expDir = g_myEnv.getOpfExperimentPath("gym")
module = loadExperimentDescriptionScriptFromDir(expDir)
expIface = getExperimentDescriptionInterfaceFromModule(module)
modelDesc = expIface.getModelDescription()
tpActivationThreshold = modelDesc['modelParams'] \
['tpParams']['activationThreshold']
expectedValue = 12
self.assertEqual(tpActivationThreshold, expectedValue,
"Expected tp activationThreshold=%s, but got %s" % (
expectedValue, tpActivationThreshold))
def test_run_sub_experiment(self):
self.executePositiveOpfExperiment(experimentName="gym", short=True)
################################################################################
# Support functions
################################################################################
def _executeExternalCmdAndReapOutputs(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
import subprocess
_debugOut(("Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " + \
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("args: <%s>: result:\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(msg):
if g_debug:
callerTraceback = whoisCallersCaller()
print "OPF TestDescriptionTemplate (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
def whoisCallersCaller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
if __name__ == "__main__":
g_myEnv = MyTestEnvironment()
unittest.longMessage = True
unittest.main()
|
asimshankar/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/converters/continue_statements.py
|
5
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes continue statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
class _Continue(object):
def __init__(self):
self.used = False
self.control_var_name = None
self.create_guard = False
self.guard_created = False
def __repr__(self):
return 'used: %s, var: %s' % (self.used, self.control_var_name)
class ContinueCanonicalizationTransformer(converter.Base):
"""Canonicalizes continue statements into additional conditionals."""
def visit_Continue(self, node):
self.state[_Continue].used = True
template = """
var_name = True
"""
return templates.replace(
template, var_name=self.state[_Continue].control_var_name)
def _postprocess_statement(self, node):
# Example of how the state machine below works:
#
# 1| stmt # State: Continue_.used = False
# | # Action: none
# 2| if cond:
# 3| continue # State: Continue_.used = True,
# | # Continue_.guard_created = False,
# | # Continue_.create_guard = False
# | # Action: Continue_.create_guard = True
# 4| stmt # State: Continue_.used = True,
# | # Continue_.guard_created = False,
# | # Continue_.create_guard = True
# | # Action: create `if not continue_used`,
# | # set Continue_.guard_created = True
# 5| stmt # State: Continue_.used = True,
# | # Continue_.guard_created = True
# | # Action: none (will be wrapped under previously
# | # created if node)
if self.state[_Continue].used:
if self.state[_Continue].guard_created:
return node, None
elif not self.state[_Continue].create_guard:
self.state[_Continue].create_guard = True
return node, None
else:
self.state[_Continue].guard_created = True
template = """
if not var_name:
original_node
"""
cond, = templates.replace(
template,
var_name=self.state[_Continue].control_var_name,
original_node=node)
return cond, cond.body
return node, None
def _visit_loop_body(self, node, nodes):
self.state[_Continue].enter()
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
continue_var = self.ctx.namer.new_symbol('continue_', scope.referenced)
self.state[_Continue].control_var_name = continue_var
nodes = self.visit_block(nodes, after_visit=self._postprocess_statement)
if self.state[_Continue].used:
template = """
var_name = False
"""
control_var_init = templates.replace(template, var_name=continue_var)
nodes = control_var_init + nodes
self.state[_Continue].exit()
return nodes
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
return node
def visit_For(self, node):
node.target = self.generic_visit(node.target)
node.iter = self.generic_visit(node.iter)
node.body = self._visit_loop_body(node, node.body)
# A continue in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
return node
def transform(node, ctx):
transformer = ContinueCanonicalizationTransformer(ctx)
node = transformer.visit(node)
return node
|
azoft-dev-team/imagrium
|
refs/heads/win
|
src/__init__.py
|
12133432
| |
ehenneken/adsws
|
refs/heads/master
|
adsws/ext/session/backends/__init__.py
|
12133432
| |
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/core/signing.py
|
110
|
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialised object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import json
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_by_path
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_by_path(settings.SIGNING_BACKEND)
return Signer('django.http.cookies' + settings.SECRET_KEY, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.sep = str(sep)
self.key = str(key or settings.SECRET_KEY)
self.salt = str(salt or
'%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if not self.sep in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
|
phektus/Django-Google-AppEngine-OpenId-Auth
|
refs/heads/master
|
django/views/static.py
|
151
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import urllib
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotModified
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not os.path.exists(fullpath):
raise Http404('"%s" does not exist' % fullpath)
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified(mimetype=mimetype)
response = HttpResponse(open(fullpath, 'rb').read(), mimetype=mimetype)
response["Last-Modified"] = http_date(statobj.st_mtime)
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>Index of {{ directory }}</title>
</head>
<body>
<h1>Index of {{ directory }}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if mtime > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
torbjoernk/pySDC
|
refs/heads/master
|
examples/fenics_advection_diffusion_1d/playground.py
|
2
|
from pySDC import CollocationClasses as collclass
from examples.fenics_advection_diffusion_1d.ProblemClass import fenics_adv_diff_1d
from pySDC.datatype_classes.fenics_mesh import fenics_mesh,rhs_fenics_mesh
from examples.fenics_advection_diffusion_1d.TransferClass import mesh_to_mesh_fenics
from pySDC.sweeper_classes.mass_matrix_imex import mass_matrix_imex
import pySDC.PFASST_blockwise as mp
# import pySDC.PFASST_stepwise as mp
from pySDC import Log
from pySDC.Stats import grep_stats, sort_stats
import dolfin as df
import numpy as np
if __name__ == "__main__":
# set global logger (remove this if you do not want the output at all)
logger = Log.setup_custom_logger('root')
num_procs = 8
# assert num_procs == 1,'turn on predictor!'
# This comes as read-in for the level class
lparams = {}
lparams['restol'] = 5E-09
sparams = {}
sparams['maxiter'] = 20
# This comes as read-in for the problem class
pparams = {}
pparams['nu'] = 0.05
pparams['mu'] = 1.0
pparams['k'] = 1
pparams['t0'] = 0.0 # ugly, but necessary to set up ProblemClass
# pparams['c_nvars'] = [(16,16)]
pparams['c_nvars'] = [128]
pparams['family'] = 'CG'
pparams['order'] = [1]
pparams['refinements'] = [1,0]
# This comes as read-in for the transfer operations
tparams = {}
tparams['finter'] = True
# Fill description dictionary for easy hierarchy creation
description = {}
description['problem_class'] = fenics_adv_diff_1d
description['problem_params'] = pparams
description['dtype_u'] = fenics_mesh
description['dtype_f'] = rhs_fenics_mesh
description['collocation_class'] = collclass.CollGaussLegendre
description['num_nodes'] = 3
description['sweeper_class'] = mass_matrix_imex
description['level_params'] = lparams
description['transfer_class'] = mesh_to_mesh_fenics
description['transfer_params'] = tparams
# quickly generate block of steps
MS = mp.generate_steps(num_procs,sparams,description)
# setup parameters "in time"
t0 = MS[0].levels[0].prob.t0
dt = 0.2
Tend = 1.6
# get initial values on finest level
P = MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend,stats = mp.run_pfasst(MS,u0=uinit,t0=t0,dt=dt,Tend=Tend)
# df.plot(uend.values,interactive=True)
# compute exact solution and compare
uex = P.u_exact(Tend)
print('(classical) error at time %s: %s' %(Tend,abs(uex-uend)/abs(uex)))
# df.plot(uex.values,key='u')
# df.plot(uend.values,key='u')
# df.plot(uex.values-uend.values)
# df.interactive()
# uex = df.Expression('sin(a*x[0]) * cos(t)',a=np.pi,t=Tend)
# print('(fenics-style) error at time %s: %s' %(Tend,df.errornorm(uex,uend.values)))
# extract_stats = grep_stats(stats,iter=-1,type='residual')
# sortedlist_stats = sort_stats(extract_stats,sortby='step')
# print(extract_stats,sortedlist_stats)
|
mikalv/android_kernel_samsung_degas3g
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
anielsen001/scipy
|
refs/heads/master
|
scipy/spatial/tests/test__plotutils.py
|
55
|
from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
|
lwiecek/django
|
refs/heads/master
|
django/contrib/gis/views.py
|
684
|
from __future__ import unicode_literals
from django.http import Http404
from django.utils.translation import ugettext as _
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
if not feed_dict:
raise Http404(_("No feeds are registered."))
slug = url.partition('/')[0]
try:
f = feed_dict[slug]
except KeyError:
raise Http404(_("Slug %r isn't registered.") % slug)
instance = f()
instance.feed_url = getattr(f, 'feed_url', None) or request.path
instance.title_template = f.title_template or ('feeds/%s_title.html' % slug)
instance.description_template = f.description_template or ('feeds/%s_description.html' % slug)
return instance(request)
|
htzy/bigfour
|
refs/heads/master
|
common/djangoapps/monkey_patch/tests/__init__.py
|
12133432
| |
scotu/django-formaggio
|
refs/heads/master
|
formaggio/migrations/__init__.py
|
12133432
| |
redhatrises/freeipa
|
refs/heads/master
|
ipaclient/remote_plugins/2_156/hbacsvcgroup.py
|
16
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
HBAC Service Groups
HBAC service groups can contain any number of individual services,
or "members". Every group must have a description.
EXAMPLES:
Add a new HBAC service group:
ipa hbacsvcgroup-add --desc="login services" login
Add members to an HBAC service group:
ipa hbacsvcgroup-add-member --hbacsvcs=sshd --hbacsvcs=login login
Display information about a named group:
ipa hbacsvcgroup-show login
Delete an HBAC service group:
ipa hbacsvcgroup-del login
""")
register = Registry()
@register()
class hbacsvcgroup(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Service group name'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'HBAC service group description'),
),
parameters.Str(
'member_hbacsvc',
required=False,
label=_(u'Member HBAC service'),
),
)
@register()
class hbacsvcgroup_add(Method):
__doc__ = _("Add a new HBAC service group.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'HBAC service group description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class hbacsvcgroup_add_member(Method):
__doc__ = _("Add members to an HBAC service group.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'hbacsvc',
required=False,
multivalue=True,
cli_name='hbacsvcs',
label=_(u'member HBAC service'),
doc=_(u'HBAC services to add'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be added'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members added'),
),
)
@register()
class hbacsvcgroup_del(Method):
__doc__ = _("Delete an HBAC service group.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class hbacsvcgroup_find(Method):
__doc__ = _("Search for an HBAC service group.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'HBAC service group description'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("name")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class hbacsvcgroup_mod(Method):
__doc__ = _("Modify an HBAC service group.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'HBAC service group description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class hbacsvcgroup_remove_member(Method):
__doc__ = _("Remove members from an HBAC service group.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'hbacsvc',
required=False,
multivalue=True,
cli_name='hbacsvcs',
label=_(u'member HBAC service'),
doc=_(u'HBAC services to remove'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be removed'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members removed'),
),
)
@register()
class hbacsvcgroup_show(Method):
__doc__ = _("Display information about an HBAC service group.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Service group name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
|
pedestre/Kernel-Apolo-JB-4.1.2
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
Shrews/PyGerrit
|
refs/heads/master
|
webapp/django/utils/cache.py
|
17
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.conf import settings
from django.core.cache import cache
from django.utils.encoding import smart_str, iri_to_uri
from django.utils.http import http_date
from django.utils.hashcompat import md5_constructor
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def _generate_cache_key(request, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
return 'views.decorators.cache.cache_page.%s.%s.%s' % (
key_prefix, iri_to_uri(request.path), ctx.hexdigest())
def get_cache_key(request, key_prefix=None):
"""
Returns a cache key based on the request path. It can be used in the
request phase because it pulls the list of headers to take into account
from the global path registry and uses those to build a cache key to check
against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, iri_to_uri(request.path))
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, iri_to_uri(request.path))
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.path
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
vivekmishra1991/scikit-learn
|
refs/heads/master
|
sklearn/feature_selection/tests/test_from_model.py
|
244
|
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
|
sramsay64/python-iview
|
refs/heads/master
|
cherrypy/_cptree.py
|
58
|
"""CherryPy Application and Tree objects."""
import os
import cherrypy
from cherrypy._cpcompat import ntou, py3k
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects directly.
Instead, they should ask an Application object for a request object.
An instance of this class may also be used as a WSGI callable
(WSGI application object) for itself.
"""
root = None
"""The top-most container of page handlers for this app. Handlers should
be arranged in a hierarchy of attributes, matching the expected URI
hierarchy; the default dispatcher then searches this hierarchy for a
matching handler. When using a dispatcher other than the default,
this value may be None."""
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = _cpconfig.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
"""A LogManager instance. See _cplogging."""
wsgiapp = None
"""A CPWSGIApp instance. See _cpwsgi."""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name="", config=None):
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces["log"] = lambda k, v: setattr(self.log, k, v)
self.namespaces["wsgi"] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name_doc = """The URI "mount point" for this app. A mount point
is that portion of the URI which is constant for all URIs that are
serviced by this application; it does not include scheme, host, or proxy
("virtual host") portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
def _get_script_name(self):
if self._script_name is not None:
return self._script_name
# A `_script_name` with a value of None signals that the script name
# should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip("/")
def _set_script_name(self, value):
if value:
value = value.rstrip("/")
self._script_name = value
script_name = property(fget=_get_script_name, fset=_set_script_name,
doc=script_name_doc)
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get("/", {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or "/"
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind("/")
if lastslash == -1:
break
elif lastslash == 0 and trail != "/":
trail = "/"
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.publish('acquire_thread')
cherrypy.engine.publish('before_request')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.publish('after_request')
try:
req.close()
except:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable
(WSGI application object), in which case it dispatches to all
mounted apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
self.apps = {}
def mount(self, root, script_name="", config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
"objects may, however, possess a script_name of None (in "
"order to inpect the WSGI environ for SCRIPT_NAME upon each "
"request). You cannot mount such Applications on this Tree; "
"you must pass them to a WSGI server interface directly.")
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
if isinstance(root, Application):
app = root
if script_name != "" and script_name != app.script_name:
raise ValueError(
"Cannot specify a different script name and pass an "
"Application instance to cherrypy.mount")
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
if (script_name == "" and root is not None
and not hasattr(root, "favicon_ico")):
favicon = os.path.join(os.getcwd(), os.path.dirname(__file__),
"favicon.ico")
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=""):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""The script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == "":
return None
# Move one node up the tree and try again.
path = path[:path.rfind("/")]
def __call__(self, environ, start_response):
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
env1x = _cpwsgi.downgrade_wsgi_ux_to_1x(environ)
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or "/")
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
if not py3k:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 2/WSGI u.0: all strings MUST be of type unicode
enc = environ[ntou('wsgi.url_encoding')]
environ[ntou('SCRIPT_NAME')] = sn.decode(enc)
environ[ntou('PATH_INFO')] = path[
len(sn.rstrip("/")):].decode(enc)
else:
# Python 2/WSGI 1.x: all strings MUST be of type str
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 3/WSGI u.0: all strings MUST be full unicode
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
# Python 3/WSGI 1.x: all strings MUST be ISO-8859-1 str
environ['SCRIPT_NAME'] = sn.encode(
'utf-8').decode('ISO-8859-1')
environ['PATH_INFO'] = path[
len(sn.rstrip("/")):].encode('utf-8').decode('ISO-8859-1')
return app(environ, start_response)
|
mitsuhiko/sentry
|
refs/heads/master
|
src/sentry/api/serializers/models/grouptagvalue.py
|
1
|
from __future__ import absolute_import
import operator
from django.db.models import Q
from sentry.api.serializers import Serializer, register
from sentry.models import EventUser, GroupTagValue, TagKey, TagValue
def parse_user_tag(value):
lookup, value = value.split(':', 1)
if lookup == 'id':
lookup = 'ident'
elif lookup == 'ip':
lookup = 'ip_address'
elif lookup not in ('email', 'ip_address', 'username'):
raise ValueError('{} is not a valid user attribute'.format(lookup))
return {lookup: value}
@register(GroupTagValue)
class GroupTagValueSerializer(Serializer):
def get_attrs(self, item_list, user):
project = item_list[0].project
user_lookups = []
for item in item_list:
if item.key != 'sentry:user':
continue
if ':' not in item.value:
continue
try:
user_lookups.append(Q(**parse_user_tag(item.value)))
except ValueError:
continue
tag_labels = {}
if user_lookups:
tag_labels.update({
('sentry:user', euser.tag_value): euser.get_label()
for euser in EventUser.objects.filter(
reduce(operator.or_, user_lookups),
project=project,
)
})
other_lookups = [
Q(key=i.key, value=i.value)
for i in item_list
if i.key != 'sentry:user'
]
if other_lookups:
tag_labels.update({
(t.key, t.value): t.get_label()
for t in TagValue.objects.filter(
reduce(operator.or_, other_lookups),
project=project,
)
})
result = {}
for item in item_list:
try:
label = tag_labels[(item.key, item.value)]
except KeyError:
label = item.value
result[item] = {
'name': label,
}
return result
def serialize(self, obj, attrs, user):
return {
'id': str(obj.id),
'name': attrs['name'],
'key': TagKey.get_standardized_key(obj.key),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
|
WhireCrow/openwrt-mt7620
|
refs/heads/master
|
staging_dir/host/lib/python2.7/lib-tk/tkFileDialog.py
|
196
|
#
# Instant Python
# $Id: tkFileDialog.py 36560 2004-07-18 06:16:08Z tim_one $
#
# tk common file dialogues
#
# this module provides interfaces to the native file dialogues
# available in Tk 4.2 and newer, and the directory dialogue available
# in Tk 8.3 and newer.
#
# written by Fredrik Lundh, May 1997.
#
#
# options (all have default values):
#
# - defaultextension: added to filename if not explicitly given
#
# - filetypes: sequence of (label, pattern) tuples. the same pattern
# may occur with several patterns. use "*" as pattern to indicate
# all files.
#
# - initialdir: initial directory. preserved by dialog instance.
#
# - initialfile: initial file (ignored by the open dialog). preserved
# by dialog instance.
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - multiple: if true user may select more than one file
#
# options for the directory chooser:
#
# - initialdir, parent, title: see above
#
# - mustexist: if true, user must pick an existing directory
#
#
from tkCommonDialog import Dialog
class _Dialog(Dialog):
def _fixoptions(self):
try:
# make sure "filetypes" is a tuple
self.options["filetypes"] = tuple(self.options["filetypes"])
except KeyError:
pass
def _fixresult(self, widget, result):
if result:
# keep directory and filename until next time
import os
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
path, file = os.path.split(result)
self.options["initialdir"] = path
self.options["initialfile"] = file
self.filename = result # compatibility
return result
#
# file dialogs
class Open(_Dialog):
"Ask for a filename to open"
command = "tk_getOpenFile"
def _fixresult(self, widget, result):
if isinstance(result, tuple):
# multiple results:
result = tuple([getattr(r, "string", r) for r in result])
if result:
import os
path, file = os.path.split(result[0])
self.options["initialdir"] = path
# don't set initialfile or filename, as we have multiple of these
return result
if not widget.tk.wantobjects() and "multiple" in self.options:
# Need to split result explicitly
return self._fixresult(widget, widget.tk.splitlist(result))
return _Dialog._fixresult(self, widget, result)
class SaveAs(_Dialog):
"Ask for a filename to save as"
command = "tk_getSaveFile"
# the directory dialog has its own _fix routines.
class Directory(Dialog):
"Ask for a directory"
command = "tk_chooseDirectory"
def _fixresult(self, widget, result):
if result:
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
# keep directory until next time
self.options["initialdir"] = result
self.directory = result # compatibility
return result
#
# convenience stuff
def askopenfilename(**options):
"Ask for a filename to open"
return Open(**options).show()
def asksaveasfilename(**options):
"Ask for a filename to save as"
return SaveAs(**options).show()
def askopenfilenames(**options):
"""Ask for multiple filenames to open
Returns a list of filenames or empty list if
cancel button selected
"""
options["multiple"]=1
return Open(**options).show()
# FIXME: are the following perhaps a bit too convenient?
def askopenfile(mode = "r", **options):
"Ask for a filename to open, and returned the opened file"
filename = Open(**options).show()
if filename:
return open(filename, mode)
return None
def askopenfiles(mode = "r", **options):
"""Ask for multiple filenames and return the open file
objects
returns a list of open file objects or an empty list if
cancel selected
"""
files = askopenfilenames(**options)
if files:
ofiles=[]
for filename in files:
ofiles.append(open(filename, mode))
files=ofiles
return files
def asksaveasfile(mode = "w", **options):
"Ask for a filename to save as, and returned the opened file"
filename = SaveAs(**options).show()
if filename:
return open(filename, mode)
return None
def askdirectory (**options):
"Ask for a directory, and return the file name"
return Directory(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
# Since the file name may contain non-ASCII characters, we need
# to find an encoding that likely supports the file name, and
# displays correctly on the terminal.
# Start off with UTF-8
enc = "utf-8"
import sys
# See whether CODESET is defined
try:
import locale
locale.setlocale(locale.LC_ALL,'')
enc = locale.nl_langinfo(locale.CODESET)
except (ImportError, AttributeError):
pass
# dialog for openening files
openfilename=askopenfilename(filetypes=[("all files", "*")])
try:
fp=open(openfilename,"r")
fp.close()
except:
print "Could not open File: "
print sys.exc_info()[1]
print "open", openfilename.encode(enc)
# dialog for saving files
saveasfilename=asksaveasfilename()
print "saveas", saveasfilename.encode(enc)
|
adelina-t/neutron
|
refs/heads/master
|
neutron/services/metering/agents/__init__.py
|
12133432
| |
abhidrona/gn-osc-custom
|
refs/heads/master
|
tests/unit/__init__.py
|
12133432
| |
pawaranand/phrerp
|
refs/heads/develop
|
erpnext/contacts/doctype/party_type/__init__.py
|
12133432
| |
armirusco/djangae
|
refs/heads/master
|
djangae/db/backends/__init__.py
|
12133432
| |
xujun10110/pupy
|
refs/heads/master
|
pupy/packages/windows/all/pupwinutils/__init__.py
|
12133432
| |
MichaelNedzelsky/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/function/before/src/a.py
|
83
|
from lib1 import urlopen
def f(url):
'''Return the representation available at the URL.
'''
return urlopen(url).read()
def f_usage():
return f(14)
class C(object):
def g(self, x):
return x
class D(C):
def g(self, x, y):
return super(D, self).f(x) + y
class E(object):
def g(self):
return -1
|
wfwei/ReadWeibo
|
refs/heads/master
|
ReadWeibo/account/tests.py
|
6666
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
vmahuli/contrail-controller
|
refs/heads/master
|
src/analytics/ruleparser/echoaction.py
|
20
|
#! /usr/bin/env /usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
# this is called from c++ code and is expected to be passed
# the following variables
# paramlist - a list of strings considered as parameters
import sys
actionresult = "echoaction.py"
i = 0
while i < len(paramlist):
actionresult = actionresult + " " + paramlist[i]
i = i + 1
|
pankajp/pyside
|
refs/heads/master
|
tests/QtGui/qinputdialog_get_test.py
|
6
|
import unittest
from PySide import QtCore, QtGui
from helper import UsesQApplication, TimedQApplication
class TestInputDialog(TimedQApplication):
def testGetDouble(self):
self.assertEquals(QtGui.QInputDialog.getDouble(None, "title", "label"), (0.0, False))
def testGetInt(self):
self.assertEquals(QtGui.QInputDialog.getInt(None, "title", "label"), (0, False))
def testGetInteger(self):
self.assertEquals(QtGui.QInputDialog.getInteger(None, "title", "label"), (0, False))
def testGetItem(self):
(item, bool) = QtGui.QInputDialog.getItem(None, "title", "label", ["1", "2", "3"])
self.assertEquals(str(item), "1")
def testGetText(self):
(text, bool) = QtGui.QInputDialog.getText(None, "title", "label")
self.assertEquals(str(text),"")
if __name__ == '__main__':
unittest.main()
|
omni5cience/django-inlineformfield
|
refs/heads/master
|
.tox/py27/lib/python2.7/site-packages/django/contrib/gis/tests/gis_migrations/migrations/__init__.py
|
12133432
| |
suyashphadtare/sajil-final-erp
|
refs/heads/develop
|
erpnext/erpnext/accounts/doctype/payment_reconciliation/__init__.py
|
12133432
| |
czpython/aldryn-newsblog
|
refs/heads/master
|
aldryn_newsblog/south_migrations/__init__.py
|
12133432
| |
AOSPU/external_chromium_org
|
refs/heads/android-5.0/py3
|
tools/telemetry/third_party/__init__.py
|
12133432
| |
alfcrisci/httpie
|
refs/heads/master
|
httpie/output/formatters/__init__.py
|
12133432
| |
stackforge/monasca-common
|
refs/heads/master
|
monasca_common/rest/__init__.py
|
12133432
| |
jcurry/ZenPacks.ZenSystems.ApcPdu
|
refs/heads/master
|
ZenPacks/ZenSystems/ApcPdu/modeler/__init__.py
|
12133432
| |
initNirvana/Easyphotos
|
refs/heads/master
|
env/lib/python3.4/site-packages/IPython/kernel/tests/__init__.py
|
12133432
| |
youprofit/NewsBlur
|
refs/heads/master
|
vendor/zebra/__init__.py
|
12133432
| |
openstack/cinder
|
refs/heads/master
|
cinder/volume/drivers/prophetstor/__init__.py
|
12133432
| |
caot/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/es/formats.py
|
232
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \de F \de Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \de F \de Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
)
TIME_INPUT_FORMATS = (
# '14:30:59', '14:30'
'%H:%M:%S', '%H:%M'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
mdrumond/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/lib/debug_graphs.py
|
65
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods for processing debugger-decorated graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.platform import tf_logging as logging
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def get_node_name(element_name):
node_name, _ = parse_node_or_tensor_name(element_name)
return node_name
def get_output_slot(element_name):
"""Get the output slot number from the name of a graph element.
If element_name is a node name without output slot at the end, 0 will be
assumed.
Args:
element_name: (`str`) name of the graph element in question.
Returns:
(`int`) output slot number.
"""
_, output_slot = parse_node_or_tensor_name(element_name)
return output_slot if output_slot is not None else 0
def is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
class GraphTracingReachedDestination(Exception):
pass
class DFSGraphTracer(object):
"""Graph input tracer using depth-first search."""
def __init__(self,
input_lists,
skip_node_names=None,
destination_node_name=None):
"""Constructor of _DFSGraphTracer.
Args:
input_lists: A list of dicts. Each dict is an adjacency (input) map from
the recipient node name as the key and the list of input node names
as the value.
skip_node_names: Optional: a list of node names to skip tracing.
destination_node_name: Optional: destination node name. If not `None`, it
should be the name of a destination not as a str and the graph tracing
will raise GraphTracingReachedDestination as soon as the node has been
reached.
Raises:
GraphTracingReachedDestination: if stop_at_node_name is not None and
the specified node is reached.
"""
self._input_lists = input_lists
self._skip_node_names = skip_node_names
self._inputs = []
self._visited_nodes = []
self._depth_count = 0
self._depth_list = []
self._destination_node_name = destination_node_name
def trace(self, graph_element_name):
"""Trace inputs.
Args:
graph_element_name: Name of the node or an output tensor of the node, as a
str.
Raises:
GraphTracingReachedDestination: if destination_node_name of this tracer
object is not None and the specified node is reached.
"""
self._depth_count += 1
node_name = get_node_name(graph_element_name)
if node_name == self._destination_node_name:
raise GraphTracingReachedDestination()
if node_name in self._skip_node_names:
return
if node_name in self._visited_nodes:
return
self._visited_nodes.append(node_name)
for input_list in self._input_lists:
if node_name not in input_list:
continue
for inp in input_list[node_name]:
if get_node_name(inp) in self._visited_nodes:
continue
self._inputs.append(inp)
self._depth_list.append(self._depth_count)
self.trace(inp)
self._depth_count -= 1
def inputs(self):
return self._inputs
def depth_list(self):
return self._depth_list
def _infer_device_name(graph_def):
"""Infer device name from a partition GraphDef."""
device_name = None
for node in graph_def.node:
if node.device:
device_name = node.device
break
if device_name is None:
logging.warn(
"Failed to infer device name from partition GraphDef: none of the "
"nodes of the GraphDef has a non-empty device name.")
return device_name
class DebugGraph(object):
"""Represents a debugger-decorated graph."""
def __init__(self, debug_graph_def, device_name=None):
self._debug_graph_def = debug_graph_def
self._non_debug_graph_def = None
self._node_attributes = {}
self._node_inputs = {}
self._node_reversed_ref_inputs = {}
self._node_ctrl_inputs = {}
self._node_recipients = {}
self._node_ctrl_recipients = {}
self._node_devices = {}
self._node_op_types = {}
self._copy_send_nodes = []
self._ref_args = {}
self._device_name = device_name
if not self._device_name:
self._device_name = _infer_device_name(debug_graph_def)
for node in debug_graph_def.node:
self._process_debug_graph_node(node)
self._prune_non_control_edges_of_debug_ops()
self._prune_control_edges_of_debug_ops()
self._prune_nodes_from_input_and_recipient_maps(self._get_copy_nodes())
self._populate_recipient_maps()
def _process_debug_graph_node(self, node):
"""Process a node from the debug GraphDef.
Args:
node: (NodeDef) A partition-graph node to be processed.
Raises:
ValueError: If duplicate node names are encountered.
"""
if is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
return
if node.name in self._node_inputs:
raise ValueError("Duplicate node name on device %s: '%s'" %
(self._device_name, node.name))
self._node_attributes[node.name] = node.attr
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
if node.name not in self._node_devices:
self._node_devices[node.name] = set()
self._node_devices[node.name].add(
node.device if node.device else self._device_name)
self._node_op_types[node.name] = node.op
self._ref_args[node.name] = self._get_ref_args(node)
for inp in node.input:
if is_copy_node(inp) and (node.op == "_Send" or node.op == "_Retval"):
self._copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
def _get_ref_args(self, node):
"""Determine whether an input of an op is ref-type.
Args:
node: A `NodeDef`.
Returns:
A list of the arg names (as strs) that are ref-type.
"""
op_def = op_def_registry.get_registered_ops().get(node.op)
ref_args = []
if op_def:
for i, output_arg in enumerate(op_def.output_arg):
if output_arg.is_ref:
arg_name = node.name if i == 0 else ("%s:%d" % (node.name, i))
ref_args.append(arg_name)
return ref_args
def _get_copy_nodes(self):
"""Find all Copy nodes in the loaded graph."""
copy_nodes = []
for node in self._node_inputs:
if is_copy_node(node):
copy_nodes.append(node)
return copy_nodes
def _prune_non_control_edges_of_debug_ops(self):
"""Prune (non-control) edges related to debug ops.
Prune the Copy ops and associated _Send ops inserted by the debugger out
from the non-control inputs and output recipients map. Replace the inputs
and recipients with original ones.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
def _prune_control_edges_of_debug_ops(self):
"""Prune control edges related to the debug ops."""
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
def _populate_recipient_maps(self):
"""Populate the map from node name to recipient(s) of its output(s).
This method also populates the input map based on reversed ref edges.
"""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
inp = get_node_name(inp)
if inp not in self._node_recipients:
self._node_recipients[inp] = []
self._node_recipients[inp].append(node)
if inp in self._ref_args:
if inp not in self._node_reversed_ref_inputs:
self._node_reversed_ref_inputs[inp] = []
self._node_reversed_ref_inputs[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in self._copy_send_nodes:
continue
if ctrl_inp not in self._node_ctrl_recipients:
self._node_ctrl_recipients[ctrl_inp] = []
self._node_ctrl_recipients[ctrl_inp].append(node)
def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):
"""Prune nodes out of input and recipient maps.
Args:
nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.
"""
for node in nodes_to_prune:
del self._node_inputs[node]
del self._node_ctrl_inputs[node]
del self._node_recipients[node]
del self._node_ctrl_recipients[node]
def _reconstruct_non_debug_graph_def(self):
"""Reconstruct non-debug GraphDef.
Non-debug GraphDef means the original GraphDef without the Copy* and Debug
nodes inserted by the debugger.
"""
if self._non_debug_graph_def:
return
self._non_debug_graph_def = graph_pb2.GraphDef()
for node in self._debug_graph_def.node:
if is_copy_node(node.name) or is_debug_node(node.name):
continue
new_node = self._non_debug_graph_def.node.add()
new_node.CopyFrom(node)
# Redo the list of inputs, because in _debug_graph_def, the list can
# consist of Copy* and Debug* nodes inserted by the debugger. Those will
# be replaced with the original inputs here.
del new_node.input[:]
for inp in self._node_inputs[node.name]:
new_node.input.append(inp)
for ctrl_inp in self._node_ctrl_inputs[node.name]:
new_node.input.append("^" + ctrl_inp)
@property
def device_name(self):
return self._device_name
@property
def debug_graph_def(self):
"""The debugger-decorated GraphDef."""
return self._debug_graph_def
@property
def non_debug_graph_def(self):
"""The GraphDef without the Copy* and Debug* nodes added by the debugger."""
self._reconstruct_non_debug_graph_def()
return self._non_debug_graph_def
@property
def node_devices(self):
return self._node_devices
@property
def node_op_types(self):
return self._node_op_types
@property
def node_attributes(self):
return self._node_attributes
@property
def node_inputs(self):
return self._node_inputs
@property
def node_ctrl_inputs(self):
return self._node_ctrl_inputs
@property
def node_reversed_ref_inputs(self):
return self._node_reversed_ref_inputs
@property
def node_recipients(self):
return self._node_recipients
@property
def node_ctrl_recipients(self):
return self._node_ctrl_recipients
def reconstruct_non_debug_graph_def(debug_graph_def):
"""Reconstruct original (non-debugger-decorated) partition GraphDef.
This method strips the input `tf.GraphDef` of the Copy* and Debug*-type nodes
inserted by the debugger.
The reconstructed partition graph is identical to the original (i.e.,
non-debugger-decorated) partition graph except in the following respects:
1) The exact names of the runtime-inserted internal nodes may differ.
These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops.
2) As a consequence of 1, the nodes that receive input directly from such
send- and recv-type ops will have different input names.
3) The parallel_iteration attribute of while-loop Enter ops are set to 1.
Args:
debug_graph_def: The debugger-decorated `tf.GraphDef`, with the
debugger-inserted Copy* and Debug* nodes.
Returns:
The reconstructed `tf.GraphDef` stripped of the debugger-inserted nodes.
"""
return DebugGraph(debug_graph_def).non_debug_graph_def
|
ajkavanagh/charms.openstack
|
refs/heads/master
|
charms_openstack/os_release_data.py
|
1
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# OpenStackCharm() - base class for build OpenStack charms from for the
# reactive framework.
# need/want absolute imports for the package imports to work properly
KNOWN_RELEASES = [
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
]
|
ict-felix/stack
|
refs/heads/master
|
vt_manager/src/python/vt_manager/settings/settingsLoader.py
|
2
|
"""
@author: msune, CarolinaFernandez
Ofelia VT manager settings loader
"""
# Import static settings
from vt_manager.settings.staticSettings import *
# Import user settings
from vt_manager.mySettings import *
# Load database info
DATABASES = {
'default': {
'ENGINE': "django.db.backends.%s" % DATABASE_ENGINE,
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
'HOST': DATABASE_HOST,
}
}
#Must be here
ADMINS = [
("expedient", ROOT_EMAIL),
]
MANAGERS = ADMINS
#from vt_manager.utils.ThemeManager import initialize
|
willemw12/get_iplayer_downloader
|
refs/heads/master
|
src/get_iplayer_downloader/get_iplayer.py
|
1
|
""" Perform get_iplayer commands. """
import ast
import logging
import os
from get_iplayer_downloader import search_cache, settings
from get_iplayer_downloader.tools import command, command_queue, config, string
logger = logging.getLogger(__name__)
####
RADIO_DOWNLOAD_PATH = settings.get_config().get("radio", "download-path")
TV_DOWNLOAD_PATH = settings.get_config().get("tv", "download-path")
# Labels for disabled filters
ALL_CATEGORIES_LABEL = "Categories"
ALL_CHANNELS_LABEL = "Channels"
SINCE_FOREVER_LABEL = "Since"
# Indices of a key-value pair
KEY_INDEX = 0
VALUE_INDEX = 1
####
_GET_IPLAYER_PROG = "get_iplayer"
#_SINCE_HOUR_MARGIN = 6
_SINCE_HOUR_MARGIN = string.str2int(settings.get_config().get(config.NOSECTION, "since-margin-hours", fallback=6))
_COMPACT_TOOLBAR = string.str2bool(settings.get_config().get(config.NOSECTION, "compact-tool-bar"))
_ALL_CATEGORIES_LABEL = ALL_CATEGORIES_LABEL if _COMPACT_TOOLBAR else ""
_ALL_CHANNELS_LABEL = ALL_CHANNELS_LABEL if _COMPACT_TOOLBAR else ""
_SINCE_FOREVER_LABEL = SINCE_FOREVER_LABEL if _COMPACT_TOOLBAR else ""
#_SINCE_FUTURE_LABEL = "Future"
####
# List of key-value pairs
#SINCE_LIST = [[0, _SINCE_FOREVER_LABEL], [1, _SINCE_FUTURE_LABEL],
SINCE_LIST = [[0, _SINCE_FOREVER_LABEL],
# [ 4, "4 hours"], [8, "8 hours"], [12, "12 hours"], [16, "16 hours"], [20, "20 hours"],
# [ 24 + _SINCE_HOUR_MARGIN, "1 day" ], [ 48 + _SINCE_HOUR_MARGIN, "2 days"],
[ 2, "2 hours"], [ 4, "4 hours"], [6, "6 hours"], [8, "8 hours"], [10, "10 hours"],
[ 12, "12 hours"], [14, "14 hours"], [16, "16 hours"], [18, "18 hours"], [20, "20 hours"],
[ 24 + _SINCE_HOUR_MARGIN, "1 day" ], [ 48 + _SINCE_HOUR_MARGIN, "2 days"],
[ 72 + _SINCE_HOUR_MARGIN, "3 days"], [ 96 + _SINCE_HOUR_MARGIN, "4 days"],
[120 + _SINCE_HOUR_MARGIN, "5 days"], [144 + _SINCE_HOUR_MARGIN, "6 days"],
[168 + _SINCE_HOUR_MARGIN, "7 days"]]
#30 DAYS EPISODE AVAILABILITY
#SINCE_LIST = [[0, _SINCE_FOREVER_LABEL],
# [ 4, "4 hours"], [8, "8 hours"], [12, "12 hours"], [16, "16 hours"], [20, "20 hours"],
# [ 24 + _SINCE_HOUR_MARGIN, "1 day" ], [ 48 + _SINCE_HOUR_MARGIN, "2 days"],
# [ 72 + _SINCE_HOUR_MARGIN, "3 days"], [ 96 + _SINCE_HOUR_MARGIN, "4 days"],
# [120 + _SINCE_HOUR_MARGIN, "5 days"], [144 + _SINCE_HOUR_MARGIN, "6 days"],
# [168 + _SINCE_HOUR_MARGIN, "1 week"], [336 + _SINCE_HOUR_MARGIN, "2 weeks"],
# [504 + _SINCE_HOUR_MARGIN, "3 weeks"], [672 + _SINCE_HOUR_MARGIN, "4 weeks"]]
class Preset:
# "preset-file": filename in folder ~/.get_iplayer/presets
RADIO = settings.get_config().get("radio", "preset-file")
TV = settings.get_config().get("tv", "preset-file")
class ProgType:
##ALL = "all"
#ALL = "radio,tv"
RADIO = "radio"
TV = "tv"
CH4 = "ch4"
ITV = "itv"
####
# List of key-value pairs
#NOTE Cannot extend a constant list: RADIO = [[None, "Genre"]].extend(...)
#WORKAROUND see get_iplayer_gui.py (at least in Python 2.7)
# RADIO = [[None, "Genre"]] --> #RADIO = [["", "Genre"]]
class Channels:
RADIO = _ALL_CHANNELS_LABEL + "," + settings.get_config().get("radio", "channels")
TV = _ALL_CHANNELS_LABEL + "," + settings.get_config().get("tv", "channels")
CH4 = _ALL_CHANNELS_LABEL
ITV = _ALL_CHANNELS_LABEL
class Categories:
#@staticmethod
def _merge_keys(key_value_list):
key_list = [row[KEY_INDEX] for row in key_value_list]
keys = ""
for i, key in enumerate(key_list):
#if i == 0 or not key:
if not key:
# Skip predefined or user-defined "all" filters
continue
keys += key
if (i < len(key_list) - 1):
keys += ","
# Use set to avoid duplicates
key_set = set(keys.split(","))
keys = ",".join(key_set)
return keys
# No filter list
ALL = [["", _ALL_CATEGORIES_LABEL]]
# The filter lists below consists of all categories in the first item, followed by the categories separately
RADIO = ast.literal_eval(settings.get_config().get("radio", "categories"))
# Prepend
RADIO.insert(0, [_merge_keys(RADIO), _ALL_CATEGORIES_LABEL])
TV = ast.literal_eval(settings.get_config().get("tv", "categories"))
# Prepend
TV.insert(0, [_merge_keys(TV), _ALL_CATEGORIES_LABEL])
####
class SinceListIndex:
FOREVER = 0
#FUTURE = 1
class SearchResultColumn:
DOWNLOAD = 0
PID = 1
INDEX = 2
SERIES = 3
EPISODE = 4
CATEGORIES = 5
CHANNELS = 6
THUMBNAIL_SMALL = 7
AVAILABLE = 8
DURATION = 9
# Additional data derived from fields above in this class
LOCATE_SEARCH_TERM = 10
####
def precheck(quiet=False):
log_output = ""
# Check required program(s)
# Call get_iplayer directly, instead of wrapping it in "shell code", to check whether it is installed or not
process_output = command.run(_GET_IPLAYER_PROG + " --usage", quiet=True, temp_pathname=settings.TEMP_PATHNAME)
#TODO not Linux specific ("not found")
if quiet and "not found" in process_output:
# command.run() already logs the error (logger.warning())
log_output += "WARNING:{0}".format(process_output)
# Check get_iplayer preset files
if not string.str2bool(settings.get_config().get(config.NOSECTION, "disable-presets")):
pathname = os.path.join(os.path.expanduser("~"), ".get_iplayer", "presets")
for preset in [Preset.RADIO, Preset.TV]:
filename = os.path.join(pathname, preset)
if not os.path.exists(filename):
msg = "preset file {0} does not exist".format(filename)
logger.warning(msg)
log_output += "WARNING:{0}".format(msg)
return log_output
#NOTE Logging is not fully initialized yet
#check_preset_files()
####
def categories(search_text, preset=None, prog_type=None):
""" Run get_iplayer --list=categories.
Return table with columns: categories, categories (key-value pair).
"""
cmd = _GET_IPLAYER_PROG
if preset:
cmd += " --preset=" + preset
if prog_type:
cmd += " --type=" + prog_type
cmd += " --list=categories --nocopyright"
if search_text:
cmd += " \"" + search_text + "\""
process_output = command.run(cmd, quiet=True)
lines = process_output.splitlines()
output_lines = []
for line in lines:
# Skip empty or message lines
if line and line[0] and not line.startswith("INFO:") and not line.startswith("Matches:"):
# Strip the count number from the categories name
categories_key = line.rsplit(" ", 1)[0].rstrip()
categories_value = categories_key
output_lines.append([categories_key, categories_value])
return output_lines
def channels(search_text, preset=None, prog_type=None, compact=False):
""" Run get_iplayer --list=channel. @compact is False: strip leading "BBC " substring.
Return comma-separated list of channels.
"""
cmd = _GET_IPLAYER_PROG
if preset:
cmd += " --preset=" + preset
if prog_type:
cmd += " --type=" + prog_type
cmd += " --list=channel --nocopyright"
if search_text:
cmd += " \"" + search_text + "\""
process_output = command.run(cmd, quiet=True)
lines = process_output.splitlines()
first_value = True
output_line = ""
for line in lines:
# Skip empty or message lines
if line and line[0] and not line.startswith("INFO:") and not line.startswith("Matches:"):
if first_value:
first_value = False
else:
output_line += ","
if compact and line.startswith("BBC "):
# Remove leading "BBC " substring
line = line[len("BBC "):]
# Strip the count number from the channel name
output_line += line.rsplit(" ", 1)[0].rstrip()
return output_line
def search(search_text, preset=None, prog_type=None,
channels=None, exclude_channels=None,
categories=None, exclude_categories=None,
since=0, future=False):
""" Run get_iplayer (--search).
Return search result in a table with columns listed in SearchResultColumn.
If cached search results are present, return that instead.
"""
output_lines = search_cache.get(prog_type)
if output_lines is not None:
# Skip 'get_iplayer --search' when search results have been cached
return output_lines
####
# PERL_UNICODE=S : avoid "Wide character in print" warning/error messages
#cmd = _GET_IPLAYER_PROG
cmd = "PERL_UNICODE=S " + _GET_IPLAYER_PROG
#if not preset:
# #cmd += " --type=all"
# cmd += " --type=" + ProgType.ALL
#else:
if preset:
cmd += " --preset=" + preset
if prog_type:
cmd += " --type=" + prog_type
if channels:
cmd += " --channel=\"" + channels + "\""
if exclude_channels:
cmd += " --exclude-channel=\"" + exclude_channels + "\""
if categories:
cmd += " --category=\"" + categories + "\""
if exclude_categories:
cmd += " --exclude-category=\"" + exclude_categories + "\""
if since:
cmd += " --since=" + str(since)
if future:
cmd += " --future"
cmd += " --listformat=\"<pid>|<index>|<name>|<episode> ~ <desc>|<categories>|<channel>|<thumbnail>|<available>|<duration>\""
# --fields: perform the same search as with "--long" plus on "pid"
cmd += " --fields=\"name,episode,desc,pid\" --nocopyright"
if search_text:
# Simple exclude search option
if search_text.startswith("-"):
cmd += " --exclude=\"" + search_text[1:] + "\""
else:
cmd += " \"" + search_text + "\""
else:
# Wildcard search
cmd += " \".*\""
process_output = command.run(cmd)
# Convert the process output lines directly to lists, matching the GtkTreeStore model data. Do not create intermediate data/interfaces
lines = process_output.splitlines()
output_lines = []
title_prev = None
level = 0
copy = False
for line in lines:
if not "|" in line:
# Skip (log) message lines
continue;
#NOTE with "def __len__()" in a metaclass: l = line.split("|", len(SearchResultColumn) - 1)
l = line.split("|", 11 - 1) # , len(SearchResultColumn) - 1)
# Make sure the line array contains at least 11 items (avoid IndexError exception)
# This better than catching IndexError exceptions below, which currently will discard the whole episode line
# TODO sanitize process_output.
# An episode description sometimes contain a newline character or a | character.
# Split() will only split the first line
for unused in range(len(l), 11): # , len(SearchResultColumn.attributes))
l.extend([''])
#ALTERNATIVE catch exceptions
# Skip empty lines
if l[0]:
# Match string containing only spaces
#ALTERNATIVE
#1) all(c in " " for c in l)
#2) p = re.compile('[ -]+$'); p.match(l)
#3) re.match("^[ ]+$", l)
if level == 0 and l[2] != title_prev:
# Going from series level (parent/root/level 0) to episode level (child/leave/level 1)
level = 1
copy = True
# if title_prev:
# Add series line
try:
output_lines.append([False, None, None, l[2], None, l[4], l[5], l[6], l[7], l[8], l[2]])
except IndexError: # as exc:
pass
elif level == 1 and l[2] != title_prev:
# Going from episode level (child/leave/level 1) to series level (parent/root/level 0)
level = 0
copy = False
if level == 1 and copy:
# Add an episode line
try:
if l[3].startswith(" ~ "):
# No episode title
output_lines.append([False, l[0], l[1], None, l[3][len(" ~ "):], l[4], l[5], l[6], l[7], l[8], None])
elif l[3].endswith(" ~ "):
# No episode description
output_lines.append([False, l[0], l[1], None, l[3][:len(l[3])-len(" ~ ")], l[4], l[5], l[6], l[7], l[8], None])
else:
output_lines.append([False, l[0], l[1], None, l[3], l[4], l[5], l[6], l[7], l[8], None])
except IndexError: # as exc:
pass
title_prev = l[2]
return output_lines
def get(search_term_list, pid=True, pvr_queue=False, preset=None, prog_type=None,
alt_recording_mode=False, dry_run=False, force=False, output_path=None,
categories=None, future=False):
""" Run get_iplayer --get, get_iplayer --pid or get_iplayer --pvrqueue.
If @pid is true, then @search_term_list contains pids.
Return tuple: launched boolean, process output string.
"""
if preset == Preset.RADIO:
output_path = RADIO_DOWNLOAD_PATH
elif preset == Preset.TV:
output_path = TV_DOWNLOAD_PATH
else:
output_path = None
#WORKAROUND Preset can be None: disable-presets is true AND data models and configuration are based on presets, not on programme types
#if preset and string.str2bool(settings.get_config().get(preset, "run-in-terminal")):
# terminal_prog = settings.get_config().get(config.NOSECTION, "terminal-emulator")
#else:
# terminal_prog = None
preset_fallback = None
if preset:
preset_fallback = preset
else:
# Determine preset from programme type
if prog_type == ProgType.RADIO:
preset_fallback = Preset.RADIO
elif prog_type == ProgType.TV:
preset_fallback = Preset.RADIO
if prog_type and string.str2bool(settings.get_config().get(preset_fallback, "run-in-terminal")):
terminal_prog = settings.get_config().get(config.NOSECTION, "terminal-emulator")
else:
terminal_prog = None
if alt_recording_mode:
if prog_type == ProgType.CH4:
alt_radio_modes = ""
alt_tv_modes = "flashnormal"
elif prog_type == ProgType.ITV:
alt_radio_modes = ""
alt_tv_modes = "itvnormal,itvhigh,itvlow"
else:
alt_radio_modes = settings.get_config().get(Preset.RADIO, "recording-modes")
alt_tv_modes = settings.get_config().get(Preset.TV, "recording-modes")
#cmd = "( for i in"
#for search_term_row in search_term_list:
# cmd += " " + search_term_row[SearchTermColumn.PID_OR_INDEX]
#cmd += "; do " + _GET_IPLAYER_PROG
cmd = ""
for i, search_term in enumerate(search_term_list):
cmd += _GET_IPLAYER_PROG + " --hash"
if preset:
cmd += " --preset=" + preset
#WORKAROUND Preset can be None: disable-presets is true AND models and configuration are based on presets, not on programme types
# if alt_recording_mode:
# if preset == Preset.RADIO and alt_radio_modes:
# #cmd += " --modes=\"" + alt_radio_modes + "\""
# cmd += " --radiomode=\"" + alt_radio_modes + "\""
# elif preset == Preset.TV and alt_tv_modes:
# #cmd += " --modes=\"" + alt_tv_modes + "\""
# cmd += " --tvmode=\"" + alt_tv_modes + "\""
if alt_recording_mode:
if preset_fallback == Preset.RADIO and alt_radio_modes:
#cmd += " --modes=\"" + alt_radio_modes + "\""
cmd += " --radiomode=\"" + alt_radio_modes + "\""
elif preset_fallback == Preset.TV and alt_tv_modes:
#cmd += " --modes=\"" + alt_tv_modes + "\""
cmd += " --tvmode=\"" + alt_tv_modes + "\""
if prog_type:
cmd += " --type=" + prog_type
cmd += " --nocopyright"
if force:
cmd += " --force --overwrite"
if output_path:
cmd += " --output=\"" + output_path + "\""
#if pvr_queue or future:
if pvr_queue:
if not preset:
return False
# Must explicitly specify programme type and PID on the command line when in pvr queue mode
cmd += " --pvrqueue --pid="
#cmd += " --pvr-exclude=" + ",".join(exclude_search_term_list)
elif pid:
cmd += " --pid="
else:
cmd += " --get "
##cmd += "\"$i\" ; done"
#cmd += "$i; done )"
if search_term:
#TEMP if search_term is a PID and the PID is numeric,
# then add a leading non-digit character to the PID
# so that get_iplayer will not assume the search_term to be an index
if " " not in search_term and prog_type in [Channels.CH4, Channels.ITV]:
search_term = " " + search_term
# search_term_list could be a set of episode indices, so don't surround them with quotes
cmd += search_term
if (i < len(search_term_list) - 1):
#cmd += "; "
cmd += "; echo '----'; "
if pvr_queue or dry_run:
launched = True
process_output = command.run(cmd, dry_run=dry_run, temp_pathname=settings.TEMP_PATHNAME)
else:
#CommandQueue.CommandQueue().run(...)
launched = command_queue.run(cmd, temp_pathname=settings.TEMP_PATHNAME,
terminal_prog=terminal_prog, terminal_title="get_iplayer get")
process_output = None
return (launched, process_output)
def info(pid, search_term, preset=None, prog_type=None, proxy_disabled=False, future=False):
""" Run 'get_iplayer --info [--pid=<pid>] [<search term>]'.
Return table with columns: series title, episode title plus description.
"""
#if not pid:
# return ""
# Only useful from outside the UK:
# If proxy_disabled is true then info retrieval may be faster but the info
# will not contain proper values for "modes" and "tvmodes" (the available TV download file sizes)
cmd = _GET_IPLAYER_PROG + " --info"
if preset:
cmd += " --preset=" + preset
if prog_type:
cmd += " --type=" + prog_type
if proxy_disabled:
cmd += " --proxy=0"
if future:
cmd += " --future"
cmd += " --nocopyright"
cmd += " --thumbsize=512"
# --fields: perform the same search as with --long plus on PID
#cmd += " --fields=\"name,episode,desc,pid\"
cmd += " --pid=" + pid
if search_term:
cmd += " --long" + " \"" + search_term + "\""
process_output = command.run(cmd)
lines = process_output.splitlines()
output_lines = []
for line in lines:
# Skip empty or message lines
if line and line[0] and not line.startswith("INFO:") and not line.startswith("Matches:"):
l = line.split(":", 1)
# Match key-value pairs
if len(l) == 2 and l[1]:
output_lines.append([l[0], l[1].lstrip()])
return output_lines
def refresh(preset=None, prog_type=None, channels=None, exclude_channels=None, force=False, future=False):
""" Run get_iplayer --refresh. Return error code. """
if search_cache.has_cache(prog_type):
# Skip 'get_iplayer --refresh' when search results have been cached
return
####
#if not preset:
# #preset = Preset.RADIO + "," + Preset.TV
# preset = "all"
cmd = _GET_IPLAYER_PROG + " --refresh"
if future:
cmd += " --refresh-future"
if channels:
#cmd += " --channel=\"" + channel + "\""
cmd += " --refresh-include=\"" + channels + "\""
if exclude_channels:
#cmd += " --exclude-channel=\"" + exclude_channel + "\""
cmd += " --refresh-exclude=\"" + exclude_channels + "\""
#if preset:
# cmd += " --preset=" + preset
if prog_type:
cmd += " --type=" + prog_type
if force:
cmd += " --force"
cmd += " --nocopyright"
if preset is None:
ret1 = command.run(cmd + " --preset=" + Preset.RADIO, temp_pathname=settings.TEMP_PATHNAME)
ret2 = command.run(cmd + " --preset=" + Preset.TV, temp_pathname=settings.TEMP_PATHNAME)
return ret2 if ret2 != 0 else ret1
else:
return command.run(cmd + " --preset=" + preset)
|
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/formatter/hangClosingParenthesisInFunctionDefinition.py
|
30
|
def func(
x,
y,
z
):
pass
|
ray-zhong/github_trend_spider
|
refs/heads/master
|
ENV/Lib/site-packages/setuptools/command/install.py
|
529
|
from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
|
ecederstrand/django
|
refs/heads/master
|
tests/admin_scripts/app_raising_messages/models.py
|
391
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core import checks
from django.db import models
class ModelRaisingMessages(models.Model):
@classmethod
def check(self, **kwargs):
return [
checks.Warning(
'First warning',
hint='Hint',
obj='obj'
),
checks.Warning(
'Second warning',
hint=None,
obj='a'
),
checks.Error(
'An error',
hint='Error hint',
obj=None,
)
]
|
sserrot/champion_relationships
|
refs/heads/master
|
venv/Lib/site-packages/nbformat/v1/__init__.py
|
27
|
"""The main module for the v1 notebook format."""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_notebook
)
from .nbjson import reads as reads_json, writes as writes_json
from .nbjson import reads as read_json, writes as write_json
from .nbjson import to_notebook as to_notebook_json
from .convert import upgrade
|
joopert/home-assistant
|
refs/heads/dev
|
homeassistant/components/tikteck/light.py
|
5
|
"""Support for Tikteck lights."""
import logging
import tikteck
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
Light,
)
from homeassistant.const import CONF_DEVICES, CONF_NAME, CONF_PASSWORD
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
SUPPORT_TIKTECK_LED = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
DEVICE_SCHEMA = vol.Schema(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tikteck platform."""
lights = []
for address, device_config in config[CONF_DEVICES].items():
device = {}
device["name"] = device_config[CONF_NAME]
device["password"] = device_config[CONF_PASSWORD]
device["address"] = address
light = TikteckLight(device)
if light.is_valid:
lights.append(light)
add_entities(lights)
class TikteckLight(Light):
"""Representation of a Tikteck light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._address = device["address"]
self._password = device["password"]
self._brightness = 255
self._hs = [0, 0]
self._state = False
self.is_valid = True
self._bulb = tikteck.tikteck(self._address, "Smart Light", self._password)
if self._bulb.connect() is False:
self.is_valid = False
_LOGGER.error("Failed to connect to bulb %s, %s", self._address, self._name)
@property
def unique_id(self):
"""Return the ID of this light."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the color property."""
return self._hs
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_TIKTECK_LED
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return the assumed state."""
return True
def set_state(self, red, green, blue, brightness):
"""Set the bulb state."""
return self._bulb.set_state(red, green, blue, brightness)
def turn_on(self, **kwargs):
"""Turn the specified light on."""
self._state = True
hs_color = kwargs.get(ATTR_HS_COLOR)
brightness = kwargs.get(ATTR_BRIGHTNESS)
if hs_color is not None:
self._hs = hs_color
if brightness is not None:
self._brightness = brightness
rgb = color_util.color_hs_to_RGB(*self._hs)
self.set_state(rgb[0], rgb[1], rgb[2], self.brightness)
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the specified light off."""
self._state = False
self.set_state(0, 0, 0, 0)
self.schedule_update_ha_state()
|
gujiawen/flask_web
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.py
|
12133432
| |
jeenalee/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/__init__.py
|
1730
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
timj/scons
|
refs/heads/master
|
test/ARGUMENTS.py
|
5
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
foo = open('foo.out', 'wb')
for k in sorted(ARGUMENTS.keys()):
foo.write(k + " = " + ARGUMENTS[k] + "\\n")
foo.close()
""")
test.run(arguments='a=1 bz=3 xx=sd zzz=foo=bar .')
test.fail_test(test.read('foo.out') != """a = 1
bz = 3
xx = sd
zzz = foo=bar
""")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
FinalAngel/django-cms
|
refs/heads/release/3.4.x
|
cms/test_utils/project/objectpermissionsapp/models.py
|
13
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
class UserObjectPermissionManager(models.Manager):
def assign_perm(self, perm, user, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``user``.
"""
if getattr(obj, 'pk', None) is None:
raise Exception("Object %s needs to be persisted first" % obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(content_type=ctype, codename=perm)
kwargs = {'permission': permission, 'user': user}
kwargs['content_type'] = ctype
kwargs['object_pk'] = obj.pk
obj_perm, created = self.get_or_create(**kwargs) # @UnusedVariable
return obj_perm
def remove_perm(self, perm, user, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``user``.
"""
if getattr(obj, 'pk', None) is None:
raise Exception("Object %s needs to be persisted first" % obj)
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(obj),
'user': user,
}
filters['object_pk'] = obj.pk
self.filter(**filters).delete()
class UserObjectPermission(models.Model):
permission = models.ForeignKey(Permission)
content_type = models.ForeignKey(ContentType)
object_pk = models.CharField(_('object ID'), max_length=255)
content_object = GenericForeignKey(fk_field='object_pk')
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'))
objects = UserObjectPermissionManager()
def save(self, *args, **kwargs):
content_type = ContentType.objects.get_for_model(self.content_object)
if content_type != self.permission.content_type:
raise ValidationError("Cannot persist permission not designed for "
"this class (permission's type is %r and object's type is %r)"
% (self.permission.content_type, content_type))
return super(UserObjectPermission, self).save(*args, **kwargs)
class Meta:
unique_together = ['user', 'permission', 'object_pk']
|
JimCircadian/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/other_mu_dir/a/__init__.py
|
12133432
| |
florian-dacosta/OCB
|
refs/heads/8.0
|
addons/website/tests/test_ui.py
|
342
|
import openerp.tests
class TestUi(openerp.tests.HttpCase):
def test_01_public_homepage(self):
self.phantom_js("/", "console.log('ok')", "openerp.website.snippet")
def test_03_admin_homepage(self):
self.phantom_js("/", "console.log('ok')", "openerp.website.editor", login='admin')
def test_04_admin_tour_banner(self):
self.phantom_js("/", "openerp.Tour.run('banner', 'test')", "openerp.Tour.tours.banner", login='admin')
# vim:et:
|
BonexGu/Blik2D-SDK
|
refs/heads/master
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/python/kernel_tests/rnn_test.py
|
2
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import timeit
import numpy as np
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def __call__(self, input_, state, scope=None):
return (input_, state + 1)
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
|
tempbottle/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_tools/test_gprof2html.py
|
75
|
"""Tests for the gprof2html script in the Tools directory."""
import os
import sys
import importlib
import unittest
from unittest import mock
import tempfile
from test.test_tools import scriptsdir, skip_if_missing, import_tool
skip_if_missing()
class Gprof2htmlTests(unittest.TestCase):
def setUp(self):
self.gprof = import_tool('gprof2html')
oldargv = sys.argv
def fixup():
sys.argv = oldargv
self.addCleanup(fixup)
sys.argv = []
def test_gprof(self):
# Issue #14508: this used to fail with an NameError.
with mock.patch.object(self.gprof, 'webbrowser') as wmock, \
tempfile.TemporaryDirectory() as tmpdir:
fn = os.path.join(tmpdir, 'abc')
open(fn, 'w').close()
sys.argv = ['gprof2html', fn]
self.gprof.main()
self.assertTrue(wmock.open.called)
if __name__ == '__main__':
unittest.main()
|
asgard-lab/neutron
|
refs/heads/master
|
neutron/tests/tempest/services/identity/v2/__init__.py
|
12133432
| |
yglazko/socorro
|
refs/heads/master
|
socorro/unittest/collector/__init__.py
|
12133432
| |
Raekkeri/nexus
|
refs/heads/master
|
nexus/templatetags/nexus_helpers.py
|
1
|
from django import template
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from nexus import conf
from nexus.modules import NexusModule
register = template.Library()
def nexus_media_prefix():
return conf.MEDIA_PREFIX.rstrip('/')
register.simple_tag(nexus_media_prefix)
def show_navigation(context):
site = context.get('nexus_site', NexusModule.get_global('site'))
request = NexusModule.get_request()
category_link_set = SortedDict([(k, {
'label': v,
'links': [],
}) for k, v in site.get_categories()])
for namespace, module in site._registry.iteritems():
module, category = module
if module.permission and not request.user.has_perm(module.permission):
continue
home_url = None
if 'request' in context:
home_url = module.get_home_url(context['request'])
if not home_url:
continue
active = request.path.startswith(home_url)
if category not in category_link_set:
if category:
label = site.get_category_label(category)
else:
label = None
category_link_set[category] = {
'label': label,
'links': []
}
category_link_set[category]['links'].append((module.get_title(), home_url, active))
category_link_set[category]['active'] = active
return {
'nexus_site': site,
'category_link_set': category_link_set.itervalues(),
}
register.inclusion_tag('nexus/navigation.html', takes_context=True)(show_navigation)
|
terencehonles/mailman
|
refs/heads/master
|
src/mailman/commands/tests/test_create.py
|
3
|
# Copyright (C) 2011-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test `bin/mailman create`."""
from __future__ import absolute_import, unicode_literals
__metaclass__ = type
__all__ = [
]
import sys
import unittest
from mailman.app.lifecycle import create_list
from mailman.commands.cli_lists import Create
from mailman.testing.layers import ConfigLayer
class FakeArgs:
language = None
owners = []
quiet = False
domain = None
listname = None
notify = False
class FakeParser:
def __init__(self):
self.message = None
def error(self, message):
self.message = message
sys.exit(1)
class TestCreate(unittest.TestCase):
"""Test `bin/mailman create`."""
layer = ConfigLayer
def setUp(self):
self.command = Create()
self.command.parser = FakeParser()
self.args = FakeArgs()
def test_cannot_create_duplicate_list(self):
# Cannot create a mailing list if it already exists.
create_list('test@example.com')
self.args.listname = ['test@example.com']
try:
self.command.process(self.args)
except SystemExit:
pass
self.assertEqual(self.command.parser.message,
'List already exists: test@example.com')
def test_invalid_posting_address(self):
# Cannot create a mailing list with an invalid posting address.
self.args.listname = ['foo']
try:
self.command.process(self.args)
except SystemExit:
pass
self.assertEqual(self.command.parser.message,
'Illegal list name: foo')
def test_invalid_owner_addresses(self):
# Cannot create a list with invalid owner addresses. LP: #778687
self.args.listname = ['test@example.com']
self.args.domain = True
self.args.owners = ['main=True']
try:
self.command.process(self.args)
except SystemExit:
pass
self.assertEqual(self.command.parser.message,
'Illegal owner addresses: main=True')
|
osmfj/sotmjp-website
|
refs/heads/master
|
sotmjp/account/models.py
|
12133432
| |
punalpatel/st2
|
refs/heads/master
|
st2tests/st2tests/fixtures/packs/runners/test_querymodule/query/__init__.py
|
12133432
| |
nlharris/narrative
|
refs/heads/master
|
src/biokbase/NarrativeJobService/__init__.py
|
12133432
| |
BrickText/BrickText
|
refs/heads/master
|
redactor/__init__.py
|
12133432
| |
lidiamcfreitas/FenixScheduleMaker
|
refs/heads/master
|
ScheduleMaker/brython/www/src/Lib/test/test_unicode_file.py
|
120
|
# Test some Unicode file name semantics
# We dont test many operations on files other than
# that their names can be used with Unicode characters.
import os, glob, time, shutil
import unicodedata
import unittest
from test.support import (run_unittest, rmtree,
TESTFN_ENCODING, TESTFN_UNICODE, TESTFN_UNENCODABLE, create_empty_file)
if not os.path.supports_unicode_filenames:
try:
TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
raise unittest.SkipTest("No Unicode filesystem semantics on this platform.")
def remove_if_exists(filename):
if os.path.exists(filename):
os.unlink(filename)
class TestUnicodeFiles(unittest.TestCase):
# The 'do_' functions are the actual tests. They generally assume the
# file already exists etc.
# Do all the tests we can given only a single filename. The file should
# exist.
def _do_single(self, filename):
self.assertTrue(os.path.exists(filename))
self.assertTrue(os.path.isfile(filename))
self.assertTrue(os.access(filename, os.R_OK))
self.assertTrue(os.path.exists(os.path.abspath(filename)))
self.assertTrue(os.path.isfile(os.path.abspath(filename)))
self.assertTrue(os.access(os.path.abspath(filename), os.R_OK))
os.chmod(filename, 0o777)
os.utime(filename, None)
os.utime(filename, (time.time(), time.time()))
# Copy/rename etc tests using the same filename
self._do_copyish(filename, filename)
# Filename should appear in glob output
self.assertTrue(
os.path.abspath(filename)==os.path.abspath(glob.glob(filename)[0]))
# basename should appear in listdir.
path, base = os.path.split(os.path.abspath(filename))
file_list = os.listdir(path)
# Normalize the unicode strings, as round-tripping the name via the OS
# may return a different (but equivalent) value.
base = unicodedata.normalize("NFD", base)
file_list = [unicodedata.normalize("NFD", f) for f in file_list]
self.assertIn(base, file_list)
# Tests that copy, move, etc one file to another.
def _do_copyish(self, filename1, filename2):
# Should be able to rename the file using either name.
self.assertTrue(os.path.isfile(filename1)) # must exist.
os.rename(filename1, filename2 + ".new")
self.assertFalse(os.path.isfile(filename2))
self.assertTrue(os.path.isfile(filename1 + '.new'))
os.rename(filename1 + ".new", filename2)
self.assertFalse(os.path.isfile(filename1 + '.new'))
self.assertTrue(os.path.isfile(filename2))
shutil.copy(filename1, filename2 + ".new")
os.unlink(filename1 + ".new") # remove using equiv name.
# And a couple of moves, one using each name.
shutil.move(filename1, filename2 + ".new")
self.assertFalse(os.path.exists(filename2))
self.assertTrue(os.path.exists(filename1 + '.new'))
shutil.move(filename1 + ".new", filename2)
self.assertFalse(os.path.exists(filename2 + '.new'))
self.assertTrue(os.path.exists(filename1))
# Note - due to the implementation of shutil.move,
# it tries a rename first. This only fails on Windows when on
# different file systems - and this test can't ensure that.
# So we test the shutil.copy2 function, which is the thing most
# likely to fail.
shutil.copy2(filename1, filename2 + ".new")
self.assertTrue(os.path.isfile(filename1 + '.new'))
os.unlink(filename1 + ".new")
self.assertFalse(os.path.exists(filename2 + '.new'))
def _do_directory(self, make_name, chdir_name):
cwd = os.getcwd()
if os.path.isdir(make_name):
rmtree(make_name)
os.mkdir(make_name)
try:
os.chdir(chdir_name)
try:
cwd_result = os.getcwd()
name_result = make_name
cwd_result = unicodedata.normalize("NFD", cwd_result)
name_result = unicodedata.normalize("NFD", name_result)
self.assertEqual(os.path.basename(cwd_result),name_result)
finally:
os.chdir(cwd)
finally:
os.rmdir(make_name)
# The '_test' functions 'entry points with params' - ie, what the
# top-level 'test' functions would be if they could take params
def _test_single(self, filename):
remove_if_exists(filename)
create_empty_file(filename)
try:
self._do_single(filename)
finally:
os.unlink(filename)
self.assertTrue(not os.path.exists(filename))
# and again with os.open.
f = os.open(filename, os.O_CREAT)
os.close(f)
try:
self._do_single(filename)
finally:
os.unlink(filename)
# The 'test' functions are unittest entry points, and simply call our
# _test functions with each of the filename combinations we wish to test
def test_single_files(self):
self._test_single(TESTFN_UNICODE)
if TESTFN_UNENCODABLE is not None:
self._test_single(TESTFN_UNENCODABLE)
def test_directories(self):
# For all 'equivalent' combinations:
# Make dir with encoded, chdir with unicode, checkdir with encoded
# (or unicode/encoded/unicode, etc
ext = ".dir"
self._do_directory(TESTFN_UNICODE+ext, TESTFN_UNICODE+ext)
# Our directory name that can't use a non-unicode name.
if TESTFN_UNENCODABLE is not None:
self._do_directory(TESTFN_UNENCODABLE+ext,
TESTFN_UNENCODABLE+ext)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
brandond/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/django_manage.py
|
57
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all
management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb,
test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run
with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
aliases: [virtualenv]
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
required: false
default: no
type: bool
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
type: bool
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
type: bool
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
type: bool
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
type: bool
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python",
for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin --email=admin@example.com"
app_path: "{{ django_dir }}"
"""
import os
import sys
from ansible.module_utils.basic import AnsibleModule
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(venv_param, 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command=dict(default=None, required=True),
app_path=dict(default=None, required=True, type='path'),
settings=dict(default=None, required=False),
pythonpath=dict(default=None, required=False, aliases=['python_path']),
virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
apps=dict(default=None, required=False),
cache_table=dict(default=None, required=False),
clear=dict(default=None, required=False, type='bool'),
database=dict(default=None, required=False),
failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures=dict(default=None, required=False),
liveserver=dict(default=None, required=False, aliases=['live_server']),
testrunner=dict(default=None, required=False, aliases=['test_runner']),
skip=dict(default=None, required=False, type='bool'),
merge=dict(default=None, required=False, type='bool'),
link=dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = module.params['app_path']
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=app_path)
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = list(filter(filt, lines))
if len(filtered_output):
changed = True
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
if __name__ == '__main__':
main()
|
BMJHayward/django
|
refs/heads/master
|
tests/template_tests/tests.py
|
183
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.contrib.auth.models import Group
from django.core import urlresolvers
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import UNKNOWN_SOURCE
from django.test import SimpleTestCase, override_settings
class TemplateTests(SimpleTestCase):
def test_string_origin(self):
template = Engine().from_string('string template')
self.assertEqual(template.origin.name, UNKNOWN_SOURCE)
self.assertEqual(template.origin.loader_name, None)
self.assertEqual(template.source, 'string template')
@override_settings(SETTINGS_MODULE=None)
def test_url_reverse_no_settings_module(self):
"""
#9005 -- url tag shouldn't require settings.SETTINGS_MODULE to
be set.
"""
t = Engine(debug=True).from_string('{% url will_not_match %}')
c = Context()
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(c)
def test_url_reverse_view_name(self):
"""
#19827 -- url tag should keep original strack trace when reraising
exception.
"""
t = Engine().from_string('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except urlresolvers.NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5,
"The traceback context was lost when reraising the traceback. See #19827")
def test_no_wrapped_exception(self):
"""
# 16770 -- The template system doesn't wrap exceptions, but annotates
them.
"""
engine = Engine(debug=True)
c = Context({"coconuts": lambda: 42 / 0})
t = engine.from_string("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as e:
t.render(c)
debug = e.exception.template_debug
self.assertEqual(debug['start'], 0)
self.assertEqual(debug['end'], 14)
def test_invalid_block_suggestion(self):
"""
#7876 -- Error messages should include the unexpected block name.
"""
engine = Engine()
with self.assertRaises(TemplateSyntaxError) as e:
engine.from_string("{% if 1 %}lala{% endblock %}{% endif %}")
self.assertEqual(
e.exception.args[0],
"Invalid block tag: 'endblock', expected 'elif', 'else' or 'endif'",
)
def test_compile_filter_expression_error(self):
"""
19819 -- Make sure the correct token is highlighted for
FilterExpression errors.
"""
engine = Engine(debug=True)
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with self.assertRaisesMessage(TemplateSyntaxError, msg) as e:
engine.from_string("{% if 1 %}{{ foo@bar }}{% endif %}")
debug = e.exception.template_debug
self.assertEqual((debug['start'], debug['end']), (10, 23))
self.assertEqual((debug['during']), '{{ foo@bar }}')
def test_compile_tag_error(self):
"""
Errors raised while compiling nodes should include the token
information.
"""
engine = Engine(
debug=True,
libraries={'bad_tag': 'template_tests.templatetags.bad_tag'},
)
with self.assertRaises(RuntimeError) as e:
engine.from_string("{% load bad_tag %}{% badtag %}")
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_super_errors(self):
"""
#18169 -- NoReverseMatch should not be silence in block.super.
"""
engine = Engine(app_dirs=True)
t = engine.get_template('included_content.html')
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(Context())
def test_debug_tag_non_ascii(self):
"""
#23060 -- Test non-ASCII model representation in debug output.
"""
group = Group(name="清風")
c1 = Context({"objs": [group]})
t1 = Engine().from_string('{% debug %}')
self.assertIn("清風", t1.render(c1))
def test_extends_generic_template(self):
"""
#24338 -- Allow extending django.template.backends.django.Template
objects.
"""
engine = Engine()
parent = engine.from_string('{% block content %}parent{% endblock %}')
child = engine.from_string(
'{% extends parent %}{% block content %}child{% endblock %}')
self.assertEqual(child.render(Context({'parent': parent})), 'child')
|
slisson/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/handlers/base.py
|
71
|
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.log import getLogger
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
except http.Http404, e:
logger.warning('Not Found: %s' % request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logger.warning('Forbidden (Permission denied): %s' % request.path,
extra={
'status_code': 403,
'request': request
})
response = http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response.render()
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request':request
}
)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
|
flacjacket/pywayland
|
refs/heads/main
|
pywayland/scanner/method.py
|
1
|
# Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from dataclasses import dataclass
from typing import Dict, Iterable, List, Optional, Tuple
from .argument import Argument
from .description import Description
from .element import Element
from .printer import Printer
@dataclass(frozen=True) # type: ignore
class Method(Element, abc.ABC):
"""Scanner for methods
Corresponds to event and requests defined on an interface
"""
name: str
since: Optional[str]
description: Optional[Description]
arg: List[Argument]
def imports(
self, interface: str, module_imports: Dict[str, str]
) -> List[Tuple[str, str]]:
"""Get the imports required for each of the interfaces
:param interface:
The name of the interface that the method is a part of.
:param module_imports:
A mapping from the name of an interface in the associated
module that the interface comes from.
:return:
A list of 2-tuples, each specifying the path to an imported
module and the imported class.
"""
current_protocol = module_imports[interface]
imports = []
for arg in self.arg:
if arg.interface is None:
continue
if arg.interface == interface:
continue
import_class = arg.interface_class
import_protocol = module_imports[arg.interface]
if current_protocol == import_protocol:
import_path = ".{}".format(arg.interface)
else:
import_path = "..{}".format(import_protocol)
imports.append((import_path, import_class))
return imports
@property
@abc.abstractmethod
def method_type(self) -> str:
pass
@property
@abc.abstractmethod
def method_args(self) -> Iterable[str]:
pass
@abc.abstractmethod
def output_doc_params(self, printer: Printer) -> None:
pass
@abc.abstractmethod
def output_body(self, printer: Printer, opcode: int) -> None:
pass
def output(
self,
printer: Printer,
opcode: int,
in_class: str,
module_imports: Dict[str, str],
) -> None:
"""Generate the output for the given method to the printer"""
if len(self.arg) > 0:
printer(f"@{in_class}.{self.method_type}(")
with printer.indented():
for arg in self.arg:
arg.output(printer)
if self.since:
printer(f"version={self.since},")
printer(")")
else:
if self.since:
printer(f"@{in_class}.{self.method_type}(version={self.since})")
else:
printer(f"@{in_class}.{self.method_type}()")
# Generate the definition of the method and args
args = ", ".join(["self"] + list(self.method_args))
printer("def {}({}):".format(self.name, args))
with printer.indented():
# Write the documentation
self.output_doc(printer)
# Write out the body of the method
self.output_body(printer, opcode)
def output_doc(self, printer: Printer) -> None:
"""Output the documentation for the interface"""
if self.description:
self.description.output(printer)
else:
printer('"""' + self.name)
# Parameter and returns documentation
if self.arg:
printer()
self.output_doc_params(printer)
printer('"""')
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
instal/script.module.liveresolver/lib/js2py/constructors/jsboolean.py
|
33
|
from js2py.base import *
BooleanPrototype.define_own_property('constructor', {'value': Boolean,
'enumerable': False,
'writable': True,
'configurable': True})
Boolean.define_own_property('prototype', {'value': BooleanPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
|
BackupGGCode/sphinx
|
refs/heads/master
|
doc/conf.py
|
3
|
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
# If your extensions are in another directory, add it here.
#sys.path.append(os.path.dirname(__file__))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Sphinx'
copyright = '2008, Georg Brandl'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import sphinx
version = sphinx.__released__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
html_additional_pages = {'index': 'index.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_use_opensearch = 'http://sphinx.pocoo.org'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
#latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Extension interface
# -------------------
from sphinx import addnodes
dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$')
def parse_directive(env, sig, signode):
if not sig.startswith('.'):
dec_sig = '.. %s::' % sig
signode += addnodes.desc_name(dec_sig, dec_sig)
return sig
m = dir_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
dec_name = '.. %s::' % name
signode += addnodes.desc_name(dec_name, dec_name)
signode += addnodes.desc_addname(args, args)
return name
def parse_role(env, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('directive', 'dir', 'pair: %s; directive', parse_directive)
app.add_description_unit('role', 'role', 'pair: %s; role', parse_role)
app.add_description_unit('confval', 'confval', 'pair: %s; configuration value')
app.add_description_unit('event', 'event', 'pair: %s; event', parse_event)
|
westurner/pyglobalgoals
|
refs/heads/master
|
notebooks/globalgoals-pyglobalgoals.py.py
|
1
|
# coding: utf-8
# # @TheGlobalGoals for Sustainable Development
# ## Background
#
# * Homepage: **http://www.globalgoals.org/**
# - Twitter: https://twitter.com/TheGlobalGoals
# - Instagram: https://instagram.com/TheGlobalGoals/
# - Facebook: https://www.facebook.com/globalgoals.org
# - YouTube: https://www.youtube.com/channel/UCRfuAYy7MesZmgOi1Ezy0ng/
# - Hashtag: **#GlobalGoals**
# - https://twitter.com/hashtag/GlobalGoals
# - https://instagram.com/explore/tags/GlobalGoals/
# - https://www.facebook.com/hashtag/GlobalGoals
# - Hashtag: #TheGlobalGoals
# - https://twitter.com/hashtag/TheGlobalGoals
# - https://instagram.com/explore/tags/TheGlobalGoals/
# - https://www.facebook.com/hashtag/TheGlobalGoals
#
#
# ### pyglobalgoals
#
# * Homepage: https://github.com/westurner/pyglobalgoals
# * Src: https://github.com/westurner/pyglobalgoals
# * Download: https://github.com/westurner/pyglobalgoals/releases
#
# ### Objectives
#
# * [x] ENH: Read and parse TheGlobalGoals from globalgoals.org
# * [x] ENH: Download (HTTP GET) each GlobalGoal tile image to ``./notebooks/data/images/``
# * [-] ENH: Generate e.g. tweets for each GlobalGoal (e.g. **##gg17** / **##GG17**)
# * [x] ENH: Save TheGlobalGoals to a JSON-LD document
# * [-] ENH: Save TheGlobalGoals with Schema.org RDF vocabulary (as JSON-LD)
# * [-] ENH: Save TheGlobalGoals as ReStructuredText with headings and images
# * [-] ENH: Save TheGlobalGoals as Markdown with headings and images
# * [-] ENH: Save TheGlobalGoals as RDFa with headings and images
# * [ ] ENH: Save TheGlobalGoals as RDFa with images like http://globalgoals.org/
# * [-] DOC: Add narrative documentation where necessary
# * [-] REF: Refactor and extract methods from ``./notebooks/`` to ``./pyglobalgoals/``
#
# ## Implementation
#
# * Python package: [**pyglobalgoals**](#pyglobalgoals)
#
# * Jupyter notebook: **``./notebooks/globalgoals-pyglobalgoals.py.ipynb``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.py
# * Src: https://github.com/westurner/pyglobalgoals/blob/develop/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.1.2/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.2.1/notebooks/globalgoals-pyglobalgoals.py.ipynb
#
# * [x] Download HTML with requests
# * [x] Parse HTML with beautifulsoup
# * [x] Generate JSON[-LD] with ``collections.OrderedDict``
# * [-] REF: Functional methods -> more formal type model -> ``pyglobalgoals.<...>``
#
#
# * [JSON-LD](#JSONLD) document: **``./notebooks/data/globalgoals.jsonld``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/data/globalgoals.jsonld
#
#
# ### JSON-LD
#
# * Wikipedia: https://en.wikipedia.org/wiki/JSON-LD
# * Homepage: http://json-ld.org/
# * Docs: http://json-ld.org/playground/
# * Hashtag: #JSONLD
#
# ### RDFa
#
# * Wikipedia: https://en.wikipedia.org/wiki/RDFa
# * Standard: http://www.w3.org/TR/rdfa-core/
# * Docs: http://www.w3.org/TR/rdfa-primer/
# * Hashtag: #RDFa
# In[1]:
#!conda install -y beautiful-soup docutils jinja2 requests
get_ipython().system(u"pip install -U beautifulsoup4 jinja2 'requests<2.8' requests-cache version-information # tweepy")
import bs4
import jinja2
import requests
import requests_cache
requests_cache.install_cache('pyglobalgoals_cache')
#!pip install -U version_information
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'version_information jupyter, bs4, jinja2, requests, requests_cache, version_information')
# In[2]:
url = "http://www.globalgoals.org/"
req = requests.get(url)
#print(req)
#print(sorted(dir(req)))
#req.<TAB>
#req??<[Ctrl-]Enter>
if not req.ok:
raise Exception(req)
content = req.content
print(content[:20])
# In[ ]:
# In[3]:
bs = bs4.BeautifulSoup(req.content)
print(bs.prettify())
# In[4]:
tiles = bs.find_all(class_='goal-tile-wrapper')
pp(tiles)
# In[5]:
tile = tiles[0]
print(tile)
# In[6]:
link = tile.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
example = {'name': img_title, 'img_src': img_src, 'href': link_href}
print(example)
# In[7]:
import collections
def get_data_from_goal_tile_wrapper_div(node, n=None):
link = node.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
output = collections.OrderedDict({'@type': 'un:GlobalGoal'})
if n:
output['n'] = n
output['name'] = img_title
output['image'] = img_src
output['url'] = link_href
return output
def get_goal_tile_data(bs):
for i, tile in enumerate(bs.find_all(class_='goal-tile-wrapper'), 1):
yield get_data_from_goal_tile_wrapper_div(tile, n=i)
tiles = list(get_goal_tile_data(bs))
import json
print(json.dumps(tiles, indent=2))
goal_tiles = tiles[:-1]
# In[ ]:
# In[8]:
import codecs
from path import Path
def build_default_context():
context = collections.OrderedDict()
# context["dc"] = "http://purl.org/dc/elements/1.1/"
context["schema"] = "http://schema.org/"
# context["xsd"] = "http://www.w3.org/2001/XMLSchema#"
# context["ex"] = "http://example.org/vocab#"
# context["ex:contains"] = {
# "@type": "@id"
# }
# default attrs (alternative: prefix each with schema:)
# schema.org/Thing == schema:Thing (!= schema:thing)
context["name"] = "http://schema.org/name"
context["image"] = {
"@type": "@id",
"@id": "http://schema.org/image"
}
context["url"] = {
"@type": "@id",
"@id":"http://schema.org/url"
}
context["description"] = {
"@type": "http://schema.org/Text",
"@id": "http://schema.org/description"
}
return context
DEFAULT_CONTEXT = build_default_context()
def goal_tiles_to_jsonld(nodes, context=None, default_context=DEFAULT_CONTEXT):
data = collections.OrderedDict()
if context is None and default_context is not None:
data['@context'] = build_default_context()
elif context:
data['@context'] = context
elif default_context:
data['@context'] = default_context
data['@graph'] = nodes
return data
DATA_DIR = Path('.') / 'data'
#DATA_DIR = Path(__file__).dirname
#DATA_DIR = determine_path_to(current_notebook) # PWD initially defaults to nb.CWD
DATA_DIR.makedirs_p()
GLOBAL_GOALS_JSONLD_PATH = DATA_DIR / 'globalgoals.jsonld'
def write_global_goals_jsonld(goal_tiles, path=GLOBAL_GOALS_JSONLD_PATH):
goal_tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
with codecs.open(path, 'w', 'utf8') as fileobj:
json.dump(goal_tiles_jsonld, fileobj, indent=2)
def read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH, prettyprint=True):
with codecs.open(path, 'r', 'utf8') as fileobj:
global_goals_dict = json.load(fileobj,
object_pairs_hook=collections.OrderedDict)
return global_goals_dict
def print_json_dumps(global_goals_dict, indent=2):
print(json.dumps(global_goals_dict, indent=indent))
write_global_goals_jsonld(goal_tiles)
global_goals_dict = read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH)
assert global_goals_dict == goal_tiles_to_jsonld(goal_tiles)
print_json_dumps(global_goals_dict)
# In[9]:
def build_tweet_for_goal_tile(node):
return '##gg{n} {name} {url} {image} @TheGlobalGoals #GlobalGoals'.format(**node)
tweets = list(build_tweet_for_goal_tile(tile) for tile in goal_tiles)
tweets
# In[10]:
for node in goal_tiles:
img_basename = node['image'].split('/')[-1]
node['image_basename'] = img_basename
node['tweet_txt'] = build_tweet_for_goal_tile(node)
print(json.dumps(goal_tiles, indent=2))
# In[11]:
#!conda install -y pycurl
try:
import pycurl
except ImportError as e:
import warnings
warnings.warn(unicode(e))
def pycurl_download_file(url, dest_path, follow_redirects=True):
with open(dest_path, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
if follow_redirects:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
c.close()
return (url, dest_path)
# In[12]:
import requests
def requests_download_file(url, dest_path, **kwargs):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(dest_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return (url, dest_path)
# In[13]:
import urllib
def urllib_urlretrieve_download_file(url, dest_path):
"""
* https://docs.python.org/2/library/urllib.html#urllib.urlretrieve
"""
(filename, headers) = urlllib.urlretrieve(url, dest_path)
return (url, filename)
# In[14]:
def deduplicate_on_attr(nodes, attr='image_basename'):
attrindex = collections.OrderedDict()
for node in nodes:
attrindex.setdefault(node[attr], [])
attrindex[node[attr]].append(node)
return attrindex
def check_for_key_collisions(dict_of_lists):
for name, _nodes in dict_of_lists.items():
if len(_nodes) > 1:
raise Exception(('duplicate filenames:')
(name, nodes))
attrindex = deduplicate_on_attr(goal_tiles, attr='image_basename')
check_for_key_collisions(attrindex)
#
IMG_DIR = DATA_DIR / 'images'
IMG_DIR.makedirs_p()
def download_goal_tile_images(nodes, img_path):
for node in nodes:
dest_path = img_path / node['image_basename']
source_url = node['image']
(url, dest) = requests_download_file(source_url, dest_path)
node['image_path'] = dest
print((node['n'], node['name']))
print((node['image_path']))
# time.sleep(1) # see: requests_cache
download_goal_tile_images(goal_tiles, IMG_DIR)
tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
print(json.dumps(tiles_jsonld, indent=2))
# In[15]:
#import jupyter.display as display
import IPython.display as display
display.Image(goal_tiles[0]['image_path'])
# In[16]:
import IPython.display
for tile in goal_tiles:
x = IPython.display.Image(tile['image_path'])
x
# In[17]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Image(tile['image_path'])
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[18]:
import string
print(string.punctuation)
NOT_URI_CHARS = dict.fromkeys(string.punctuation + string.digits)
NOT_URI_CHARS.pop('-')
NOT_URI_CHARS.pop('_')
def _slugify(txt):
"""an ~approximate slugify function for human-readable URI #fragments"""
txt = txt.strip().lower()
chars = (
(c if c != ' ' else '-') for c in txt if
c not in NOT_URI_CHARS)
return u''.join(chars)
def _slugify_single_dash(txt):
"""
* unlike docutils, this function does not strip stopwords like 'and' and 'or'
TODO: locate this method in docutils
"""
def _one_dash_only(txt):
count = 0
for char in txt:
if char == '-':
count += 1
else:
if count:
yield '-'
yield char
count = 0
return u''.join(_one_dash_only(_slugify(txt)))
for node in goal_tiles:
node['name_numbered'] = "%d. %s" % (node['n'], node['name'])
node['slug_rst'] = _slugify_single_dash(node['name'])
node['slug_md'] = _slugify_single_dash(node['name'])
print_json_dumps(goal_tiles)
# In[19]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Markdown("## %s" % tile['name_numbered'])
yield IPython.display.Image(tile['image_path'])
yield IPython.display.Markdown(tile['tweet_txt'].replace('##', '\##'))
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[20]:
TMPL_RST = """
The Global Goals
******************
.. contents::
{% for node in nodes %}
{{ node['name_numbered'] }}
======================================================
| {{ node['url'] }}
.. image:: {{ node['image'] }}{# node['image_path'] #}
:target: {{ node['url'] }}
:alt: {{ node['name'] }}
..
{{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_rst = jinja2.Template(TMPL_RST)
output_rst = tmpl_rst.render(nodes=goal_tiles)
print(output_rst)
# In[21]:
output_rst_path = DATA_DIR / 'globalgoals.rst'
with codecs.open(output_rst_path, 'w', encoding='utf-8') as f:
f.write(output_rst)
print("# wrote goals to %r" % output_rst_path)
# In[22]:
import docutils.core
output_rst_html = docutils.core.publish_string(output_rst, writer_name='html')
print(bs4.BeautifulSoup(output_rst_html).find(id='the-global-goals'))
# In[23]:
IPython.display.HTML(output_rst_html)
# In[24]:
TMPL_MD = """
# The Global Goals
**Contents:**
{% for node in nodes %}
* [{{ node['name_numbered'] }}](#{{ node['slug_md'] }})
{%- endfor %}
{% for node in nodes %}
## {{ node['name_numbered'] }}
{{ node['url'] }}
[![{{node['name_numbered']}}]({{ node['image'] }})]({{ node['url'] }})
> {{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_md = jinja2.Template(TMPL_MD)
output_markdown = tmpl_md.render(nodes=goal_tiles)
print(output_markdown)
# In[25]:
output_md_path = DATA_DIR / 'globalgoals.md'
with codecs.open(output_md_path, 'w', encoding='utf-8') as f:
f.write(output_markdown)
print("# wrote goals to %r" % output_md_path)
# In[26]:
IPython.display.Markdown(output_markdown)
# In[27]:
context = dict(nodes=goal_tiles)
# In[28]:
TMPL_HTML = """
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{% for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile">
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
"""
tmpl_html = jinja2.Template(TMPL_HTML)
output_html = tmpl_html.render(**context)
print(output_html)
# In[29]:
output_html_path = DATA_DIR / 'globalgoals.html'
with codecs.open(output_html_path, 'w', encoding='utf-8') as f:
f.write(output_html)
print("# wrote goals to %r" % output_html_path)
# In[30]:
IPython.display.HTML(output_html)
# In[31]:
import jinja2
# TODO: prefix un:
TMPL_RDFA_HTML5 = ("""
<div prefix="schema: http://schema.org/
un: http://schema.un.org/#">
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{%- for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile" resource="{{node.url}}" typeof="un:GlobalGoal">
<div style="display:none">
<meta property="schema:name">{{node.name}}</meta>
<meta property="schema:image">{{node.image}}</meta>
<meta property="#n">{{node.n}}</meta>
</div>
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a property="schema:url" href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
</div>
"""
)
tmpl_rdfa_html5 = jinja2.Template(TMPL_RDFA_HTML5)
output_rdfa_html5 = tmpl_rdfa_html5.render(**context)
print(output_rdfa_html5)
# In[32]:
output_rdfa_html5_path = DATA_DIR / 'globalgoals.rdfa.html5.html'
with codecs.open(output_rdfa_html5_path, 'w', encoding='utf-8') as f:
f.write(output_rdfa_html5_path)
print("# wrote goals to %r" % output_rdfa_html5_path)
# In[33]:
IPython.display.HTML(output_rdfa_html5)
# In[34]:
# tmpl_html
# tmpl_rdfa_html5
import difflib
for line in difflib.unified_diff(
TMPL_HTML.splitlines(),
TMPL_RDFA_HTML5.splitlines()):
print(line)
|
gobabiertoAR/datos.gob.ar
|
refs/heads/master
|
ckanext/gobar_theme/package_controller.py
|
2
|
from ckan.controllers.package \
import PackageController, _encode_params, search_url, render, NotAuthorized, check_access, abort, get_action, log
from urllib import urlencode
from pylons import config
from paste.deploy.converters import asbool
from ckan.lib.render import deprecated_lazy_render
import ckan.lib.maintain as maintain
import ckan.lib.helpers as h
import ckan.model as model
import ckan.plugins as p
from ckan.common import OrderedDict, _, request, c, g
import ckan.logic as logic
from ckan.lib.search import SearchError
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.base as base
import cgi
CACHE_PARAMETERS = ['__cache', '__no_cache__']
NotFound = logic.NotFound
ValidationError = logic.ValidationError
redirect = base.redirect
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
def collect_descendants(organization_list):
partial = []
for organization in organization_list:
partial.append(organization['name'])
if 'children' in organization and len(organization['children']) > 0:
partial += collect_descendants(organization['children'])
return partial
def search_organization(organization_name, organizations_branch=None):
if organizations_branch is None:
organizations_branch = logic.get_action('group_tree')({}, {'type': 'organization'})
for organization in organizations_branch:
if organization['name'] == organization_name:
if 'children' in organization and len(organization['children']) > 0:
return collect_descendants(organization['children'])
elif 'children' in organization and len(organization['children']) > 0:
inner_search = search_organization(organization_name, organization['children'])
if len(inner_search) > 0:
return inner_search
return []
def custom_organization_filter(organization_name):
descendant_organizations = search_organization(organization_name)
if descendant_organizations and len(descendant_organizations) > 0:
descendant_organizations_filter = ' OR '.join(descendant_organizations)
organization_filter = '(%s OR %s)' % (organization_name, descendant_organizations_filter)
else:
organization_filter = organization_name
return ' organization:%s' % organization_filter
class GobArPackageController(PackageController):
def search(self):
package_type = self._guess_package_type()
try:
context = {'model': model, 'user': c.user or c.author, 'auth_user_obj': c.userobj}
check_access('site_read', context)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
q = c.q = request.params.get('q', u'')
c.query_error = False
page = self._get_page_number(request.params)
limit = g.datasets_per_page
params_nopage = [(k, v) for k, v in request.params.items() if k != 'page']
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(
alternative_url=alternative_url,
controller='package',
action='search',
new_params=by
)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace, controller='package', action='search')
c.remove_field = remove_field
sort_by = request.params.get('sort', None)
params_nosort = [(k, v) for k, v in params_nopage if k != 'sort']
def _sort_by(fields):
params = params_nosort[:]
if fields:
sort_string = ', '.join('%s %s' % f for f in fields)
params.append(('sort', sort_string))
return search_url(params, package_type)
c.sort_by = _sort_by
if not sort_by:
c.sort_by_fields = []
else:
c.sort_by_fields = [field.split()[0] for field in sort_by.split(',')]
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params, package_type)
c.search_url_params = urlencode(_encode_params(params_nopage))
try:
c.fields = []
c.fields_grouped = {}
search_extras = {}
fq = ''
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
if param != 'organization':
fq += ' %s:"%s"' % (param, value)
else:
fq += custom_organization_filter(value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
if package_type and package_type != 'dataset':
fq += ' +dataset_type:{type}'.format(type=package_type)
elif not asbool(config.get('ckan.search.show_all_types', 'False')):
fq += ' +dataset_type:dataset'
facets = OrderedDict()
default_facet_titles = {
'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses'),
}
for facet in g.facets:
if facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
else:
facets[facet] = facet
for plugin in p.PluginImplementations(p.IFacets):
facets = plugin.dataset_facets(facets, package_type)
c.facet_titles = facets
data_dict = {
'q': q,
'fq': fq.strip(),
'facet.field': facets.keys(),
'rows': limit,
'start': (page - 1) * limit,
'sort': sort_by,
'extras': search_extras
}
query = get_action('package_search')(context, data_dict)
c.sort_by_selected = query['sort']
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
c.facets = query['facets']
c.search_facets = query['search_facets']
c.page.items = query['results']
except SearchError, se:
log.error('Dataset search error: %r', se.args)
c.query_error = True
c.facets = {}
c.search_facets = {}
c.page = h.Page(collection=[])
c.search_facets_limits = {}
for facet in c.search_facets.keys():
try:
if facet != 'organization':
limit = int(request.params.get('_%s_limit' % facet, g.facets_default_number))
else:
limit = None
except ValueError:
error_description = _('Parameter "{parameter_name}" is not an integer')
error_description = error_description.format(parameter_name='_%s_limit' % facet)
abort(400, error_description)
c.search_facets_limits[facet] = limit
maintain.deprecate_context_item('facets', 'Use `c.search_facets` instead.')
self._setup_template_variables(context, {}, package_type=package_type)
return render(self._search_template(package_type), extra_vars={'dataset_type': package_type})
def new_resource(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or \
clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
resource_id = data['id']
del data['id']
self._validate_resource(data)
context = {'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj}
if save_action == 'go-dataset':
# go to first step
h.redirect_to(controller='package', action='edit', id=id)
# see if we have any data that we are trying to save
data_provided = False
def boolean_value(value):
return value and value != u'[]'
for key, value in data.iteritems():
if ((boolean_value(value) or isinstance(value, cgi.FieldStorage))
and key != 'resource_type' and key != 'license_id'):
data_provided = True
break
if not data_provided and save_action != "go-dataset-complete":
# see if we have added any resources
try:
data_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(403, _('Unauthorized to update dataset'))
except NotFound:
abort(404, _('The dataset {id} could not be found.'
).format(id=id))
if not len(data_dict['resources']):
# no data so keep on page
msg = _('You must add at least one data resource')
# On new templates do not use flash message
if asbool(config.get('ckan.legacy_templates')):
h.flash_error(msg)
h.redirect_to(controller='package',
action='new_resource', id=id)
else:
errors = {}
error_summary = {_('Error'): msg}
return self.new_resource(id, data, errors,
error_summary)
# XXX race condition if another user edits/deletes
data_dict = get_action('package_show')(context, {'id': id})
get_action('package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state='active'))
h.redirect_to(controller='package', action='read', id=id)
data['package_id'] = id
try:
if resource_id:
data['id'] = resource_id
get_action('resource_update')(context, data)
else:
get_action('resource_create')(context, data)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new_resource(id, data, errors, error_summary)
except NotAuthorized:
abort(403, _('Unauthorized to create a resource'))
except NotFound:
abort(404, _('The dataset {id} could not be found.'
).format(id=id))
if save_action == 'go-metadata':
# XXX race condition if another user edits/deletes
data_dict = get_action('package_show')(context, {'id': id})
get_action('package_update')(
dict(context, allow_state_change=True),
dict(data_dict, state='active'))
h.redirect_to(controller='package', action='read', id=id)
elif save_action == 'go-dataset':
# go to first stage of add dataset
h.redirect_to(controller='package', action='edit', id=id)
elif save_action == 'go-dataset-complete':
# go to first stage of add dataset
h.redirect_to(controller='package', action='read', id=id)
elif save_action == 'save-draft':
h.redirect_to(controller='package', action='read', id=id)
else:
# add more resources
h.redirect_to(controller='package', action='new_resource',
id=id)
# get resources for sidebar
context = {'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj}
try:
pkg_dict = get_action('package_show')(context, {'id': id})
except NotFound:
abort(404, _('The dataset {id} could not be found.').format(id=id))
try:
check_access(
'resource_create', context, {"package_id": pkg_dict["id"]})
except NotAuthorized:
abort(403, _('Unauthorized to create a resource for this package'))
package_type = pkg_dict['type'] or 'dataset'
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new',
'resource_form_snippet': self._resource_form(package_type),
'dataset_type': package_type}
vars['pkg_name'] = id
# required for nav menu
vars['pkg_dict'] = pkg_dict
template = 'package/new_resource_not_draft.html'
if pkg_dict['state'].startswith('draft'):
vars['stage'] = ['complete', 'active']
template = 'package/new_resource.html'
return render(template, extra_vars=vars)
def new(self, data=None, errors=None, error_summary=None):
if data and 'type' in data:
package_type = data['type']
else:
package_type = self._guess_package_type(True)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
# Package needs to have a organization group in the call to
# check_access and also to save it
try:
check_access('package_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a package'))
if context['save'] and not data:
return self._save_new(context, package_type=package_type)
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.params, ignore_keys=CACHE_PARAMETERS))))
c.resources_json = h.json.dumps(data.get('resources', []))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(
h.dict_list_reduce(data.get('tags', {}), 'name'))
errors = errors or {}
error_summary = error_summary or {}
# in the phased add dataset we need to know that
# we have already completed stage 1
stage = ['active']
if data.get('state', '').startswith('draft'):
stage = ['active', 'complete']
# if we are creating from a group then this allows the group to be
# set automatically
data['group_id'] = request.params.get('group') or \
request.params.get('groups__0__id')
form_snippet = self._package_form(package_type=package_type)
form_vars = {'data': data, 'errors': errors,
'error_summary': error_summary,
'action': 'new', 'stage': stage,
'dataset_type': package_type,
}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {},
package_type=package_type)
new_template = self._new_template(package_type)
c.form = deprecated_lazy_render(
new_template,
form_snippet,
lambda: render(form_snippet, extra_vars=form_vars),
'use of c.form is deprecated. please see '
'ckan/templates/package/base_form_page.html for an example '
'of the new way to include the form snippet'
)
return render(new_template,
extra_vars={'form_vars': form_vars, 'form_snippet': form_snippet, 'dataset_type': package_type})
def _save_new(self, context, package_type=None):
# The staged add dataset used the new functionality when the dataset is
# partially created so we need to know if we actually are updating or
# this is a real new.
is_an_update = False
ckan_phase = request.params.get('_ckan_phase')
from ckan.lib.search import SearchIndexError
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
if ckan_phase:
# prevent clearing of groups etc
context['allow_partial_update'] = True
# sort the tags
if 'tag_string' in data_dict:
data_dict['tags'] = self._tag_string_to_list(data_dict['tag_string'])
self._validate_dataset(data_dict)
if data_dict.get('pkg_name'):
is_an_update = True
# This is actually an update not a save
data_dict['id'] = data_dict['pkg_name']
del data_dict['pkg_name']
# don't change the dataset state
data_dict['state'] = 'draft'
# this is actually an edit not a save
pkg_dict = get_action('package_update')(context, data_dict)
if request.params['save'] == 'go-metadata':
# redirect to add metadata
url = h.url_for(controller='package', action='new_metadata', id=pkg_dict['name'])
elif request.params['save'] == 'save-draft':
url = h.url_for(controller='package', action='read', id=pkg_dict['name'])
else:
# redirect to add dataset resources
url = h.url_for(controller='package', action='new_resource', id=pkg_dict['name'])
redirect(url)
# Make sure we don't index this dataset
if request.params['save'] not in ['go-resource', 'go-metadata']:
data_dict['state'] = 'draft'
# allow the state to be changed
context['allow_state_change'] = True
data_dict['type'] = package_type
context['message'] = data_dict.get('log_message', '')
pkg_dict = get_action('package_create')(context, data_dict)
if ckan_phase and request.params['save'] != 'save-draft':
url = h.url_for(controller='package', action='new_resource', id=pkg_dict['name'])
redirect(url)
elif request.params['save'] == 'save-draft':
url = h.url_for(controller='package', action='read', id=pkg_dict['name'])
redirect(url)
self._form_save_redirect(pkg_dict['name'], 'new', package_type=package_type)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to add package to search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
if is_an_update:
# we need to get the state of the dataset to show the stage we
# are on.
pkg_dict = get_action('package_show')(context, data_dict)
data_dict['state'] = pkg_dict['state']
return self.edit(data_dict['id'], data_dict,
errors, error_summary)
data_dict['state'] = 'none'
return self.new(data_dict, errors, error_summary)
def edit(self, id, data=None, errors=None, error_summary=None):
package_type = self._get_package_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
if context['save'] and not data:
return self._save_edit(id, context, package_type=package_type)
try:
c.pkg_dict = get_action('package_show')(dict(context,
for_view=True),
{'id': id})
context['for_edit'] = True
old_data = get_action('package_show')(context, {'id': id})
# old data is from the database and data is passed from the
# user if there is a validation error. Use users data if there.
if data:
old_data.update(data)
data = old_data
except (NotFound, NotAuthorized):
abort(404, _('Dataset not found'))
# are we doing a multiphase add?
if data.get('state', '').startswith('draft'):
c.form_action = h.url_for(controller='package', action='new')
c.form_style = 'new'
return self.new(data=data, errors=errors,
error_summary=error_summary)
c.pkg = context.get("package")
c.resources_json = h.json.dumps(data.get('resources', []))
try:
check_access('package_update', context)
except NotAuthorized:
abort(403, _('User %r not authorized to edit %s') % (c.user, id))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(h.dict_list_reduce(
c.pkg_dict.get('tags', {}), 'name'))
errors = errors or {}
form_snippet = self._package_form(package_type=package_type)
form_vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'edit',
'dataset_type': package_type,
}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {'id': id},
package_type=package_type)
# we have already completed stage 1
form_vars['stage'] = ['active']
if data.get('state', '').startswith('draft'):
form_vars['stage'] = ['active', 'complete']
edit_template = self._edit_template(package_type)
return render(edit_template,
extra_vars={'form_vars': form_vars,
'form_snippet': form_snippet,
'dataset_type': package_type})
def _save_edit(self, name_or_id, context, package_type=None):
from ckan.lib.search import SearchIndexError
log.debug('Package save request name: %s POST: %r',
name_or_id, request.POST)
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
self._validate_dataset(data_dict)
if '_ckan_phase' in data_dict:
# we allow partial updates to not destroy existing resources
context['allow_partial_update'] = True
if 'tag_string' in data_dict:
data_dict['tags'] = self._tag_string_to_list(
data_dict['tag_string'])
del data_dict['_ckan_phase']
del data_dict['save']
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = name_or_id
pkg = get_action('package_update')(context, data_dict)
c.pkg = context['package']
c.pkg_dict = pkg
self._form_save_redirect(pkg['name'], 'edit',
package_type=package_type)
except NotAuthorized:
abort(403, _('Unauthorized to read package %s') % id)
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to update search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(name_or_id, data_dict, errors, error_summary)
def _validate_length(self, data, attribute, max_length):
if len(data[attribute]) > max_length:
raise ValidationError("%s must not have more than %s characters." % (attribute, max_length))
def _validate_resource(self, data_dict):
max_name_characters = 150
max_desc_characters = 200
self._validate_length(data_dict, 'name', max_name_characters)
self._validate_length(data_dict, 'description', max_desc_characters)
def _validate_dataset(self, data_dict):
max_title_characters = 100
max_desc_characters = 500
self._validate_length(data_dict, 'title', max_title_characters)
self._validate_length(data_dict, 'notes', max_desc_characters)
|
EPDCenter/android_kernel_bq_qc
|
refs/heads/testoc
|
tools/perf/util/setup.py
|
2554
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf])
|
conikuvat/edegal
|
refs/heads/master
|
backend/edegal_site/wsgi.py
|
1
|
"""
WSGI config for edegal2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edegal_site.settings")
application = get_wsgi_application()
|
agfor/chipy.org
|
refs/heads/master
|
chipy_org/deploy/fcgi.py
|
21
|
from django.core.servers.fastcgi import runfastcgi
import pinax.env
# setup the environment for Django and Pinax
pinax.env.setup_environ(__file__)
# pass off handling to FastCGI
runfastcgi(method="threaded", daemonize="false")
|
BoltzmannBrain/nupic.research
|
refs/heads/master
|
htmresearch/algorithms/hierarchical_clustering.py
|
12
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy
import scipy.cluster.hierarchy
import scipy.sparse
from nupic.algorithms.KNNClassifier import KNNClassifier
class LinkageNotComputedException(Exception):
pass
class HierarchicalClustering(object):
"""
Implements hierarchical agglomerative clustering on the output of a
classification network.
The dissimilarity measure used is the negative overlap between SDRs.
There are 3 steps that must be performed to use the class.
1) The class must be initialized with a KNNClassifier instance, from which it
extracts training vectors.
2) The `cluster()` method must be called with a string parameter specifying
the linkage function for agglomerative clustering. See docstring on `cluster`
for more information. This method can take significant time to execute.
3) Once `cluster()` is called, the visualization methods can be called.
Currently supported visualization methods are `getDendrogram()` and
`getClusterPrototypes()`.
Note that steps 2 and 3 above can be repeated to visualize the same data
clustered using different linkage functions.
Example
=======
hc = HierarchicalClustering(knn)
hc.cluster("complete")
prototypes = hc.getClusterPrototypes(20, 5)
"""
def __init__(self, knn):
"""
Initialization for HierarchicalClustering object.
@param knn (nupic.algorithms.KNNClassifier) Populated instance of KNN
classifer from which to draw training vectors.
"""
self._knn = knn
self._overlaps = None
self._linkage = None
def cluster(self, linkageMethod="single"):
"""
Perform hierarchical clustering on training vectors using specified linkage
method. Results can be obtained using getLinkageMatrix(), etc.
@param linkageMethod (string) Linkage method for computing between-class
dissimilarities. Valid options are: "single" (aka min), "complete"
(aka max), "average", and "weighted". For more information, see
http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.cluster.hierarchy.linkage.html
"""
if self._overlaps is None:
self._populateOverlaps()
overlaps = self._overlaps
linkage = scipy.cluster.hierarchy.linkage(-overlaps, method=linkageMethod)
self._linkage = linkage
def getLinkageMatrix(self):
"""
Returns a linkage matrix of the form defined by
http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.cluster.hierarchy.linkage.html
"""
if self._linkage is None:
raise LinkageNotComputedException
return self._linkage.copy()
def getDendrogram(self, truncate_mode=None, p=30):
linkage = self.getLinkageMatrix()
linkage[:,2] -= numpy.min(linkage[:,2])
n = linkage.shape[0]
def llf(id):
if id < n:
return "leaf: " + str(id)
else:
return '[%d %d %1.2f]' % (id, 2, linkage[n-id,3])
fig = plt.figure(figsize=(10,8), dpi=400)
ax = plt.axes()
scipy.cluster.hierarchy.dendrogram(linkage,
p=p, truncate_mode=truncate_mode, ax=ax, orientation="right", leaf_font_size=5,
no_labels=False, leaf_rotation=45.0, show_leaf_counts=True, leaf_label_func=llf,
color_threshold=50)
for label in ax.get_yticklabels():
label.set_fontsize(4)
return fig
def getClusterPrototypes(self, numClusters, numPrototypes=1):
"""
Create numClusters flat clusters and find approximately numPrototypes
prototypes per flat cluster. Returns an array with each row containing the
indices of the prototypes for a single flat cluster.
@param numClusters (int) Number of flat clusters to return (approximate).
@param numPrototypes (int) Number of prototypes to return per cluster.
@returns (tuple of numpy.ndarray) The first element is an array with rows
containing the indices of the prototypes for a single flat cluster.
If a cluster has less than numPrototypes members, missing indices are
filled in with -1. The second element is an array of number of elements
in each cluster.
"""
linkage = self.getLinkageMatrix()
linkage[:, 2] -= linkage[:, 2].min()
clusters = scipy.cluster.hierarchy.fcluster(
linkage, numClusters, criterion="maxclust")
prototypes = []
clusterSizes = []
for cluster_id in numpy.unique(clusters):
ids = numpy.arange(len(clusters))[clusters == cluster_id]
clusterSizes.append(len(ids))
if len(ids) > numPrototypes:
cluster_prototypes = HierarchicalClustering._getPrototypes(
ids, self._overlaps, numPrototypes)
else:
cluster_prototypes = numpy.ones(numPrototypes) * -1
cluster_prototypes[:len(ids)] = ids
prototypes.append(cluster_prototypes)
return numpy.vstack(prototypes).astype(int), numpy.array(clusterSizes)
##################
# Helper Methods #
##################
@staticmethod
def _getPrototypes(indices, overlaps, topNumber=1):
"""
Given a compressed overlap array and a set of indices specifying a subset
of those in that array, return the set of topNumber indices of vectors that
have maximum average overlap with other vectors in `indices`.
@param indices (arraylike) Array of indices for which to get prototypes.
@param overlaps (numpy.ndarray) Condensed array of overlaps of the form
returned by _computeOverlaps().
@param topNumber (int) The number of prototypes to return. Optional,
defaults to 1.
@returns (numpy.ndarray) Array of indices of prototypes
"""
# find the number of data points based on the length of the overlap array
# solves for n: len(overlaps) = n(n-1)/2
n = numpy.roots([1, -1, -2 * len(overlaps)]).max()
k = len(indices)
indices = numpy.array(indices, dtype=int)
rowIdxs = numpy.ndarray((k, k-1), dtype=int)
colIdxs = numpy.ndarray((k, k-1), dtype=int)
for i in xrange(k):
rowIdxs[i, :] = indices[i]
colIdxs[i, :i] = indices[:i]
colIdxs[i, i:] = indices[i+1:]
idx = HierarchicalClustering._condensedIndex(rowIdxs, colIdxs, n)
subsampledOverlaps = overlaps[idx]
meanSubsampledOverlaps = subsampledOverlaps.mean(1)
biggestOverlapSubsetIdxs = numpy.argsort(
-meanSubsampledOverlaps)[:topNumber]
return indices[biggestOverlapSubsetIdxs]
@staticmethod
def _condensedIndex(indicesA, indicesB, n):
"""
Given a set of n points for which pairwise overlaps are stored in a flat
array X in the format returned by _computeOverlaps (upper triangular of the
overlap matrix in row-major order), this function returns the indices in X
that correspond to the overlaps for the pairs of points specified.
Example
-------
Consider the case with n = 5 data points for which pairwise overlaps are
stored in array X, which has length 10 = n(n-1)/2. To obtain the overlap
of points 2 and 3 and the overlap of points 4 and 1, call
idx = _condensedIndex([2, 4], [3, 1], 5) # idx == [6, 1]
Note: Since X does not contain the diagonal (self-comparisons), it is
invalid to pass arrays such that indicesA[i] == indicesB[i] for any i.
@param indicesA (arraylike) First dimension of pairs of datapoint indices
@param indicesB (arraylike) Second dimension of pairs of datapoint indices
@param n (int) Number of datapoints
@returns (numpy.ndarray) Indices in condensed overlap matrix containing
specified overlaps. Dimension will be same as indicesA and indicesB.
"""
indicesA = numpy.array(indicesA, dtype=int)
indicesB = numpy.array(indicesB, dtype=int)
n = int(n)
# Ensure that there are no self-comparisons
assert (indicesA != indicesB).all()
# re-arrange indices to ensure that rowIxs[i] < colIxs[i] for all i
rowIxs = numpy.where(indicesA < indicesB, indicesA, indicesB)
colIxs = numpy.where(indicesA < indicesB, indicesB, indicesA)
flatIxs = rowIxs * (n - 1) - (rowIxs + 1) * rowIxs / 2 + colIxs - 1
return flatIxs
def _populateOverlaps(self):
sparseDataMatrix = HierarchicalClustering._extractVectorsFromKNN(self._knn)
self._overlaps = HierarchicalClustering._computeOverlaps(sparseDataMatrix)
@staticmethod
def _extractVectorsFromKNN(knn):
dim = len(knn.getPattern(0, sparseBinaryForm=False))
sparseRowList = []
for i in xrange(knn._numPatterns):
nzIndices = knn.getPattern(i, sparseBinaryForm=True)
sparseRow = scipy.sparse.csr_matrix(
(numpy.ones(len(nzIndices), dtype=bool),
(numpy.zeros(len(nzIndices)), nzIndices)),
shape=(1,dim))
sparseRowList.append(sparseRow)
sparseDataMatrix = scipy.sparse.vstack(sparseRowList)
return sparseDataMatrix
@staticmethod
def _computeOverlaps(data, selfOverlaps=False, dtype="int16"):
"""
Calculates all pairwise overlaps between the rows of the input. Returns an
array of all n(n-1)/2 values in the upper triangular portion of the
pairwise overlap matrix. Values are returned in row-major order.
@param data (scipy.sparse.csr_matrix) A CSR sparse matrix with one vector
per row. Any non-zero value is considered an active bit.
@param selfOverlaps (boolean) If true, include diagonal (density) values
from the pairwise similarity matrix. Then the returned vector has
n(n+1)/2 elements. Optional, defaults to False.
@param dtype (string) Data type of returned array in numpy dtype format.
Optional, defaults to 'int16'.
@returns (numpy.ndarray) A vector of pairwise overlaps as described above.
"""
nVectors = data.shape[0]
nDims = data.shape[1]
nPairs = (nVectors+1)*nVectors/2 if selfOverlaps else (
nVectors*(nVectors-1)/2)
overlaps = numpy.ndarray(nPairs, dtype=dtype)
pos = 0
for i in xrange(nVectors):
start = i if selfOverlaps else i+1
a = data[i]
b = data[start:]
newOverlaps = a.multiply(b).getnnz(1)
run = newOverlaps.shape[0]
overlaps[pos:pos+run] = newOverlaps
pos += run
return overlaps
|
izhukov/ansible
|
refs/heads/devel
|
v2/ansible/plugins/action/unarchive.py
|
10
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pipes
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=dict()):
''' handler for unarchive operations '''
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
copy = self._task.args.get('copy', True)
creates = self._task.args.get('creates', None)
if source is None or dest is None:
return dict(failed=True, msg="src (or content) and dest are required")
if not tmp:
tmp = self._make_tmp_path()
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
module_args_tmp = "path=%s" % creates
result = self._execute_module(module_name='stat', module_args=dict(path=creates))
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
dest = self._remote_expand_user(dest, tmp) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if copy:
# FIXME: the original file stuff needs to be reworked
if '_original_file' in task_vars:
source = self._loader.path_dwim_relative(task_vars['_original_file'], 'files', source)
else:
source = self._loader.path_dwim(source)
remote_checksum = self._remote_checksum(tmp, dest)
if remote_checksum != '3':
return dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
elif remote_checksum == '4':
return dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
if copy:
# transfer the file to a remote tmp location
tmp_src = tmp + 'source'
self._connection.put_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
# fix file permissions when the copy is done as a different user
if copy:
if self._connection_info.become and self._connection_info.become_user != 'root':
# FIXME: noop stuff needs to be reworked
#if not self.runner.noop_on_check(task_vars):
# self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
self._remote_chmod(tmp, 'a+r', tmp_src)
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
original_basename=os.path.basename(source),
),
)
# make sure checkmod is passed on correctly
# FIXME: noop again, probably doesn't need to be done here anymore?
#if self.runner.noop_on_check(task_vars):
# new_module_args['CHECKMODE'] = True
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
original_basename=os.path.basename(source),
),
)
# make sure checkmod is passed on correctly
# FIXME: noop again, probably doesn't need to be done here anymore?
#if self.runner.noop_on_check(task_vars):
# module_args += " CHECKMODE=True"
# execute the unarchive module now, with the updated args
return self._execute_module(module_args=new_module_args)
|
Evgenus/wiki-rest-json-api-test
|
refs/heads/master
|
wikiapi/app.py
|
1
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('wikiapi.config')
app.config.from_pyfile('application.cfg', silent=True)
db = SQLAlchemy(app)
from wikiapi import views, models
db.create_all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.